Jelajahi Sumber

move test and xml files

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/branches/HADOOP-4687/core@779196 13f79535-47bb-0310-9956-ffa450edef68
Giridharan Kesavan 16 tahun lalu
induk
melakukan
bd524bd960
52 mengubah file dengan 0 tambahan dan 10852 penghapusan
  1. 0 9
      src/test/hdfs-site.xml
  2. 0 103
      src/test/hdfs-with-mr/org/apache/hadoop/fs/AccumulatingReducer.java
  3. 0 551
      src/test/hdfs-with-mr/org/apache/hadoop/fs/DFSCIOTest.java
  4. 0 353
      src/test/hdfs-with-mr/org/apache/hadoop/fs/DistributedFSCheck.java
  5. 0 129
      src/test/hdfs-with-mr/org/apache/hadoop/fs/IOMapperBase.java
  6. 0 853
      src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java
  7. 0 445
      src/test/hdfs-with-mr/org/apache/hadoop/fs/TestDFSIO.java
  8. 0 629
      src/test/hdfs-with-mr/org/apache/hadoop/fs/TestFileSystem.java
  9. 0 213
      src/test/hdfs-with-mr/org/apache/hadoop/fs/TestHarFileSystem.java
  10. 0 964
      src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java
  11. 0 344
      src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBenchWithoutMR.java
  12. 0 603
      src/test/hdfs-with-mr/org/apache/hadoop/io/FileBench.java
  13. 0 98
      src/test/hdfs-with-mr/org/apache/hadoop/io/TestSequenceFileMergeProgress.java
  14. 0 197
      src/test/hdfs-with-mr/org/apache/hadoop/ipc/TestSocketFactory.java
  15. 0 152
      src/test/hdfs-with-mr/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java
  16. 0 46
      src/test/hdfs-with-mr/org/apache/hadoop/test/AllTestDriver.java
  17. 0 75
      src/test/hdfs-with-mr/org/apache/hadoop/test/HdfsWithMRTestDriver.java
  18. 0 221
      src/test/hdfs-with-mr/org/apache/hadoop/tools/TestDistCh.java
  19. 0 18
      src/test/mapred-site.xml
  20. 0 404
      src/webapps/datanode/browseBlock.jsp
  21. 0 192
      src/webapps/datanode/browseDirectory.jsp
  22. 0 135
      src/webapps/datanode/tail.jsp
  23. 0 280
      src/webapps/hdfs/dfshealth.jsp
  24. 0 276
      src/webapps/hdfs/dfsnodelist.jsp
  25. 0 35
      src/webapps/hdfs/index.html
  26. 0 77
      src/webapps/hdfs/nn_browsedfscontent.jsp
  27. 0 269
      src/webapps/job/analysejobhistory.jsp
  28. 0 35
      src/webapps/job/index.html
  29. 0 80
      src/webapps/job/jobblacklistedtrackers.jsp
  30. 0 71
      src/webapps/job/jobconf.jsp
  31. 0 75
      src/webapps/job/jobconf_history.jsp
  32. 0 400
      src/webapps/job/jobdetails.jsp
  33. 0 280
      src/webapps/job/jobdetailshistory.jsp
  34. 0 187
      src/webapps/job/jobfailures.jsp
  35. 0 324
      src/webapps/job/jobhistory.jsp
  36. 0 89
      src/webapps/job/jobqueue_details.jsp
  37. 0 154
      src/webapps/job/jobtasks.jsp
  38. 0 88
      src/webapps/job/jobtaskshistory.jsp
  39. 0 173
      src/webapps/job/jobtracker.jsp
  40. 0 68
      src/webapps/job/loadhistory.jsp
  41. 0 138
      src/webapps/job/machines.jsp
  42. 0 292
      src/webapps/job/taskdetails.jsp
  43. 0 125
      src/webapps/job/taskdetailshistory.jsp
  44. 0 106
      src/webapps/job/taskstats.jsp
  45. 0 29
      src/webapps/secondary/index.html
  46. 0 39
      src/webapps/secondary/status.jsp
  47. TEMPAT SAMPAH
      src/webapps/static/hadoop-logo.jpg
  48. 0 134
      src/webapps/static/hadoop.css
  49. 0 18
      src/webapps/static/jobconf.xsl
  50. 0 151
      src/webapps/static/jobtracker.js
  51. 0 17
      src/webapps/task/index.html
  52. 0 108
      src/webapps/task/tasktracker.jsp

+ 0 - 9
src/test/hdfs-site.xml

@@ -1,9 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-
-</configuration>

+ 0 - 103
src/test/hdfs-with-mr/org/apache/hadoop/fs/AccumulatingReducer.java

@@ -1,103 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.MapReduceBase;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.Reducer;
-import org.apache.hadoop.mapred.Reporter;
-
-/**
- * Reducer that accumulates values based on their type.
- * <p>
- * The type is specified in the key part of the key-value pair 
- * as a prefix to the key in the following way
- * <p>
- * <tt>type:key</tt>
- * <p>
- * The values are accumulated according to the types:
- * <ul>
- * <li><tt>s:</tt> - string, concatenate</li>
- * <li><tt>f:</tt> - float, summ</li>
- * <li><tt>l:</tt> - long, summ</li>
- * </ul>
- * 
- */
-public class AccumulatingReducer extends MapReduceBase
-    implements Reducer<Text, Text, Text, Text> {
-  static final String VALUE_TYPE_LONG = "l:";
-  static final String VALUE_TYPE_FLOAT = "f:";
-  static final String VALUE_TYPE_STRING = "s:";
-  private static final Log LOG = LogFactory.getLog(AccumulatingReducer.class);
-  
-  protected String hostName;
-  
-  public AccumulatingReducer () {
-    LOG.info("Starting AccumulatingReducer !!!");
-    try {
-      hostName = java.net.InetAddress.getLocalHost().getHostName();
-    } catch(Exception e) {
-      hostName = "localhost";
-    }
-    LOG.info("Starting AccumulatingReducer on " + hostName);
-  }
-  
-  public void reduce(Text key, 
-                     Iterator<Text> values,
-                     OutputCollector<Text, Text> output, 
-                     Reporter reporter
-                     ) throws IOException {
-    String field = key.toString();
-
-    reporter.setStatus("starting " + field + " ::host = " + hostName);
-
-    // concatenate strings
-    if (field.startsWith(VALUE_TYPE_STRING)) {
-      String sSum = "";
-      while (values.hasNext())
-        sSum += values.next().toString() + ";";
-      output.collect(key, new Text(sSum));
-      reporter.setStatus("finished " + field + " ::host = " + hostName);
-      return;
-    }
-    // sum long values
-    if (field.startsWith(VALUE_TYPE_FLOAT)) {
-      float fSum = 0;
-      while (values.hasNext())
-        fSum += Float.parseFloat(values.next().toString());
-      output.collect(key, new Text(String.valueOf(fSum)));
-      reporter.setStatus("finished " + field + " ::host = " + hostName);
-      return;
-    }
-    // sum long values
-    if (field.startsWith(VALUE_TYPE_LONG)) {
-      long lSum = 0;
-      while (values.hasNext()) {
-        lSum += Long.parseLong(values.next().toString());
-      }
-      output.collect(key, new Text(String.valueOf(lSum)));
-    }
-    reporter.setStatus("finished " + field + " ::host = " + hostName);
-  }
-}

+ 0 - 551
src/test/hdfs-with-mr/org/apache/hadoop/fs/DFSCIOTest.java

@@ -1,551 +0,0 @@
- /**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import java.io.BufferedReader;
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.PrintStream;
-import java.util.Date;
-import java.util.StringTokenizer;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.mapred.*;
-
-/**
- * Distributed i/o benchmark.
- * <p>
- * This test writes into or reads from a specified number of files.
- * File size is specified as a parameter to the test. 
- * Each file is accessed in a separate map task.
- * <p>
- * The reducer collects the following statistics:
- * <ul>
- * <li>number of tasks completed</li>
- * <li>number of bytes written/read</li>
- * <li>execution time</li>
- * <li>io rate</li>
- * <li>io rate squared</li>
- * </ul>
- *    
- * Finally, the following information is appended to a local file
- * <ul>
- * <li>read or write test</li>
- * <li>date and time the test finished</li>   
- * <li>number of files</li>
- * <li>total number of bytes processed</li>
- * <li>throughput in mb/sec (total number of bytes / sum of processing times)</li>
- * <li>average i/o rate in mb/sec per file</li>
- * <li>standard i/o rate deviation</li>
- * </ul>
- */
-public class DFSCIOTest extends TestCase {
-  // Constants
-  private static final Log LOG = LogFactory.getLog(DFSCIOTest.class);
-  private static final int TEST_TYPE_READ = 0;
-  private static final int TEST_TYPE_WRITE = 1;
-  private static final int TEST_TYPE_CLEANUP = 2;
-  private static final int DEFAULT_BUFFER_SIZE = 1000000;
-  private static final String BASE_FILE_NAME = "test_io_";
-  private static final String DEFAULT_RES_FILE_NAME = "DFSCIOTest_results.log";
-  
-  private static Configuration fsConfig = new Configuration();
-  private static final long MEGA = 0x100000;
-  private static String TEST_ROOT_DIR = System.getProperty("test.build.data","/benchmarks/DFSCIOTest");
-  private static Path CONTROL_DIR = new Path(TEST_ROOT_DIR, "io_control");
-  private static Path WRITE_DIR = new Path(TEST_ROOT_DIR, "io_write");
-  private static Path READ_DIR = new Path(TEST_ROOT_DIR, "io_read");
-  private static Path DATA_DIR = new Path(TEST_ROOT_DIR, "io_data");
-
-  private static Path HDFS_TEST_DIR = new Path("/tmp/DFSCIOTest");
-  private static String HDFS_LIB_VERSION = System.getProperty("libhdfs.version", "1");
-  private static String CHMOD = new String("chmod");
-  private static Path HDFS_SHLIB = new Path(HDFS_TEST_DIR + "/libhdfs.so." + HDFS_LIB_VERSION);
-  private static Path HDFS_READ = new Path(HDFS_TEST_DIR + "/hdfs_read");
-  private static Path HDFS_WRITE = new Path(HDFS_TEST_DIR + "/hdfs_write");
-
-  /**
-   * Run the test with default parameters.
-   * 
-   * @throws Exception
-   */
-  public void testIOs() throws Exception {
-    testIOs(10, 10);
-  }
-
-  /**
-   * Run the test with the specified parameters.
-   * 
-   * @param fileSize file size
-   * @param nrFiles number of files
-   * @throws IOException
-   */
-  public static void testIOs(int fileSize, int nrFiles)
-    throws IOException {
-
-    FileSystem fs = FileSystem.get(fsConfig);
-
-    createControlFile(fs, fileSize, nrFiles);
-    writeTest(fs);
-    readTest(fs);
-  }
-
-  private static void createControlFile(
-                                        FileSystem fs,
-                                        int fileSize, // in MB 
-                                        int nrFiles
-                                        ) throws IOException {
-    LOG.info("creating control file: "+fileSize+" mega bytes, "+nrFiles+" files");
-
-    fs.delete(CONTROL_DIR, true);
-
-    for(int i=0; i < nrFiles; i++) {
-      String name = getFileName(i);
-      Path controlFile = new Path(CONTROL_DIR, "in_file_" + name);
-      SequenceFile.Writer writer = null;
-      try {
-        writer = SequenceFile.createWriter(fs, fsConfig, controlFile,
-                                           Text.class, LongWritable.class,
-                                           CompressionType.NONE);
-        writer.append(new Text(name), new LongWritable(fileSize));
-      } catch(Exception e) {
-        throw new IOException(e.getLocalizedMessage());
-      } finally {
-    	if (writer != null)
-          writer.close();
-    	writer = null;
-      }
-    }
-    LOG.info("created control files for: "+nrFiles+" files");
-  }
-
-  private static String getFileName(int fIdx) {
-    return BASE_FILE_NAME + Integer.toString(fIdx);
-  }
-  
-  /**
-   * Write/Read mapper base class.
-   * <p>
-   * Collects the following statistics per task:
-   * <ul>
-   * <li>number of tasks completed</li>
-   * <li>number of bytes written/read</li>
-   * <li>execution time</li>
-   * <li>i/o rate</li>
-   * <li>i/o rate squared</li>
-   * </ul>
-   */
-  private abstract static class IOStatMapper extends IOMapperBase {
-    IOStatMapper() { 
-      super(fsConfig);
-    }
-    
-    void collectStats(OutputCollector<Text, Text> output, 
-                      String name,
-                      long execTime, 
-                      Object objSize) throws IOException {
-      long totalSize = ((Long)objSize).longValue();
-      float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA);
-      LOG.info("Number of bytes processed = " + totalSize);
-      LOG.info("Exec time = " + execTime);
-      LOG.info("IO rate = " + ioRateMbSec);
-      
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "tasks"),
-          new Text(String.valueOf(1)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"),
-          new Text(String.valueOf(totalSize)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"),
-          new Text(String.valueOf(execTime)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"),
-          new Text(String.valueOf(ioRateMbSec*1000)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "sqrate"),
-          new Text(String.valueOf(ioRateMbSec*ioRateMbSec*1000)));
-    }
-  }
-
-  /**
-   * Write mapper class.
-   */
-  public static class WriteMapper extends IOStatMapper {
-
-    public WriteMapper() { 
-      super(); 
-      for(int i=0; i < bufferSize; i++)
-        buffer[i] = (byte)('0' + i % 50);
-    }
-
-    public Object doIO(Reporter reporter, 
-                       String name, 
-                       long totalSize 
-                       ) throws IOException {
-      // create file
-      totalSize *= MEGA;
-      
-      // create instance of local filesystem 
-      FileSystem localFS = FileSystem.getLocal(fsConfig);
-      
-      try {
-        // native runtime
-        Runtime runTime = Runtime.getRuntime();
-          
-        // copy the dso and executable from dfs and chmod them
-        synchronized (this) {
-          localFS.delete(HDFS_TEST_DIR, true);
-          if (!(localFS.mkdirs(HDFS_TEST_DIR))) {
-            throw new IOException("Failed to create " +	HDFS_TEST_DIR + " on local filesystem");
-          }
-        }
-        
-        synchronized (this) {
-          if (!localFS.exists(HDFS_SHLIB)) {
-            FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig);
-
-            String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB);
-            Process process = runTime.exec(chmodCmd);
-            int exitStatus = process.waitFor();
-            if (exitStatus != 0) {
-              throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
-            }
-          }
-        } 
-        
-        synchronized (this) {
-          if (!localFS.exists(HDFS_WRITE)) {
-            FileUtil.copy(fs, HDFS_WRITE, localFS, HDFS_WRITE, false, fsConfig);
-
-            String chmodCmd = new String(CHMOD + " a+x " + HDFS_WRITE); 
-            Process process = runTime.exec(chmodCmd);
-            int exitStatus = process.waitFor();
-            if (exitStatus != 0) {
-              throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
-            }
-          }
-        }
-    	  	  
-        // exec the C program
-        Path outFile = new Path(DATA_DIR, name);
-        String writeCmd = new String(HDFS_WRITE + " " + outFile + " " + totalSize + " " + bufferSize); 
-        Process process = runTime.exec(writeCmd, null, new File(HDFS_TEST_DIR.toString()));
-        int exitStatus = process.waitFor();
-        if (exitStatus != 0) {
-          throw new IOException(writeCmd + ": Failed with exitStatus: " + exitStatus);
-        }
-      } catch (InterruptedException interruptedException) {
-        reporter.setStatus(interruptedException.toString());
-      } finally {
-        localFS.close();
-      }
-      return new Long(totalSize);
-    }
-  }
-
-  private static void writeTest(FileSystem fs)
-    throws IOException {
-
-    fs.delete(DATA_DIR, true);
-    fs.delete(WRITE_DIR, true);
-    
-    runIOTest(WriteMapper.class, WRITE_DIR);
-  }
-  
-  private static void runIOTest( Class<? extends Mapper> mapperClass, 
-                                 Path outputDir
-                                 ) throws IOException {
-    JobConf job = new JobConf(fsConfig, DFSCIOTest.class);
-
-    FileInputFormat.setInputPaths(job, CONTROL_DIR);
-    job.setInputFormat(SequenceFileInputFormat.class);
-
-    job.setMapperClass(mapperClass);
-    job.setReducerClass(AccumulatingReducer.class);
-
-    FileOutputFormat.setOutputPath(job, outputDir);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(Text.class);
-    job.setNumReduceTasks(1);
-    JobClient.runJob(job);
-  }
-
-  /**
-   * Read mapper class.
-   */
-  public static class ReadMapper extends IOStatMapper {
-
-    public ReadMapper() { 
-      super(); 
-    }
-
-    public Object doIO(Reporter reporter, 
-                       String name, 
-                       long totalSize 
-                       ) throws IOException {
-      totalSize *= MEGA;
-      
-      // create instance of local filesystem 
-      FileSystem localFS = FileSystem.getLocal(fsConfig);
-      
-      try {
-        // native runtime
-        Runtime runTime = Runtime.getRuntime();
-        
-        // copy the dso and executable from dfs
-        synchronized (this) {
-          localFS.delete(HDFS_TEST_DIR, true);
-          if (!(localFS.mkdirs(HDFS_TEST_DIR))) {
-            throw new IOException("Failed to create " +	HDFS_TEST_DIR + " on local filesystem");
-          }
-        }
-        
-        synchronized (this) {
-          if (!localFS.exists(HDFS_SHLIB)) {
-            if (!FileUtil.copy(fs, HDFS_SHLIB, localFS, HDFS_SHLIB, false, fsConfig)) {
-              throw new IOException("Failed to copy " + HDFS_SHLIB + " to local filesystem");
-            }
-
-            String chmodCmd = new String(CHMOD + " a+x " + HDFS_SHLIB);
-            Process process = runTime.exec(chmodCmd);
-            int exitStatus = process.waitFor();
-            if (exitStatus != 0) {
-              throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
-            }
-          }
-        }
-        
-        synchronized (this) {
-          if (!localFS.exists(HDFS_READ)) {
-            if (!FileUtil.copy(fs, HDFS_READ, localFS, HDFS_READ, false, fsConfig)) {
-              throw new IOException("Failed to copy " + HDFS_READ + " to local filesystem");
-            }
-
-            String chmodCmd = new String(CHMOD + " a+x " + HDFS_READ); 
-            Process process = runTime.exec(chmodCmd);
-            int exitStatus = process.waitFor();
-             
-            if (exitStatus != 0) {
-              throw new IOException(chmodCmd + ": Failed with exitStatus: " + exitStatus);
-            }
-          }
-        }
-    	  	  
-        // exec the C program
-        Path inFile = new Path(DATA_DIR, name);
-        String readCmd = new String(HDFS_READ + " " + inFile + " " + totalSize + " " + 
-                                    bufferSize); 
-        Process process = runTime.exec(readCmd, null, new File(HDFS_TEST_DIR.toString()));
-        int exitStatus = process.waitFor();
-        
-        if (exitStatus != 0) {
-          throw new IOException(HDFS_READ + ": Failed with exitStatus: " + exitStatus);
-        }
-      } catch (InterruptedException interruptedException) {
-        reporter.setStatus(interruptedException.toString());
-      } finally {
-        localFS.close();
-      }
-      return new Long(totalSize);
-    }
-  }
-
-  private static void readTest(FileSystem fs) throws IOException {
-    fs.delete(READ_DIR, true);
-    runIOTest(ReadMapper.class, READ_DIR);
-  }
-
-  private static void sequentialTest(
-                                     FileSystem fs, 
-                                     int testType, 
-                                     int fileSize, 
-                                     int nrFiles
-                                     ) throws Exception {
-    IOStatMapper ioer = null;
-    if (testType == TEST_TYPE_READ)
-      ioer = new ReadMapper();
-    else if (testType == TEST_TYPE_WRITE)
-      ioer = new WriteMapper();
-    else
-      return;
-    for(int i=0; i < nrFiles; i++)
-      ioer.doIO(Reporter.NULL,
-                BASE_FILE_NAME+Integer.toString(i), 
-                MEGA*fileSize);
-  }
-
-  public static void main(String[] args) {
-    int testType = TEST_TYPE_READ;
-    int bufferSize = DEFAULT_BUFFER_SIZE;
-    int fileSize = 1;
-    int nrFiles = 1;
-    String resFileName = DEFAULT_RES_FILE_NAME;
-    boolean isSequential = false;
-
-    String version="DFSCIOTest.0.0.1";
-    String usage = "Usage: DFSCIOTest -read | -write | -clean [-nrFiles N] [-fileSize MB] [-resFile resultFileName] [-bufferSize Bytes] ";
-    
-    System.out.println(version);
-    if (args.length == 0) {
-      System.err.println(usage);
-      System.exit(-1);
-    }
-    for (int i = 0; i < args.length; i++) {       // parse command line
-      if (args[i].startsWith("-r")) {
-        testType = TEST_TYPE_READ;
-      } else if (args[i].startsWith("-w")) {
-        testType = TEST_TYPE_WRITE;
-      } else if (args[i].startsWith("-clean")) {
-        testType = TEST_TYPE_CLEANUP;
-      } else if (args[i].startsWith("-seq")) {
-        isSequential = true;
-      } else if (args[i].equals("-nrFiles")) {
-        nrFiles = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-fileSize")) {
-        fileSize = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-bufferSize")) {
-        bufferSize = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-resFile")) {
-        resFileName = args[++i];
-      }
-    }
-
-    LOG.info("nrFiles = " + nrFiles);
-    LOG.info("fileSize (MB) = " + fileSize);
-    LOG.info("bufferSize = " + bufferSize);
-  
-    try {
-      fsConfig.setInt("test.io.file.buffer.size", bufferSize);
-      FileSystem fs = FileSystem.get(fsConfig);
-      
-      if (testType != TEST_TYPE_CLEANUP) {
-        fs.delete(HDFS_TEST_DIR, true);
-        if (!fs.mkdirs(HDFS_TEST_DIR)) {
-          throw new IOException("Mkdirs failed to create " + 
-                                HDFS_TEST_DIR.toString());
-        }
-
-        //Copy the executables over to the remote filesystem
-        String hadoopHome = System.getenv("HADOOP_HOME");
-        fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/libhdfs.so." + HDFS_LIB_VERSION),
-                             HDFS_SHLIB);
-        fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_read"), HDFS_READ);
-        fs.copyFromLocalFile(new Path(hadoopHome + "/libhdfs/hdfs_write"), HDFS_WRITE);
-      }
-
-      if (isSequential) {
-        long tStart = System.currentTimeMillis();
-        sequentialTest(fs, testType, fileSize, nrFiles);
-        long execTime = System.currentTimeMillis() - tStart;
-        String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000;
-        LOG.info(resultLine);
-        return;
-      }
-      if (testType == TEST_TYPE_CLEANUP) {
-        cleanup(fs);
-        return;
-      }
-      createControlFile(fs, fileSize, nrFiles);
-      long tStart = System.currentTimeMillis();
-      if (testType == TEST_TYPE_WRITE)
-        writeTest(fs);
-      if (testType == TEST_TYPE_READ)
-        readTest(fs);
-      long execTime = System.currentTimeMillis() - tStart;
-    
-      analyzeResult(fs, testType, execTime, resFileName);
-    } catch(Exception e) {
-      System.err.print(e.getLocalizedMessage());
-      System.exit(-1);
-    }
-  }
-  
-  private static void analyzeResult( FileSystem fs, 
-                                     int testType,
-                                     long execTime,
-                                     String resFileName
-                                     ) throws IOException {
-    Path reduceFile;
-    if (testType == TEST_TYPE_WRITE)
-      reduceFile = new Path(WRITE_DIR, "part-00000");
-    else
-      reduceFile = new Path(READ_DIR, "part-00000");
-    DataInputStream in;
-    in = new DataInputStream(fs.open(reduceFile));
-  
-    BufferedReader lines;
-    lines = new BufferedReader(new InputStreamReader(in));
-    long tasks = 0;
-    long size = 0;
-    long time = 0;
-    float rate = 0;
-    float sqrate = 0;
-    String line;
-    while((line = lines.readLine()) != null) {
-      StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
-      String attr = tokens.nextToken(); 
-      if (attr.endsWith(":tasks"))
-        tasks = Long.parseLong(tokens.nextToken());
-      else if (attr.endsWith(":size"))
-        size = Long.parseLong(tokens.	nextToken());
-      else if (attr.endsWith(":time"))
-        time = Long.parseLong(tokens.nextToken());
-      else if (attr.endsWith(":rate"))
-        rate = Float.parseFloat(tokens.nextToken());
-      else if (attr.endsWith(":sqrate"))
-        sqrate = Float.parseFloat(tokens.nextToken());
-    }
-    
-    double med = rate / 1000 / tasks;
-    double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med*med));
-    String resultLines[] = {
-      "----- DFSCIOTest ----- : " + ((testType == TEST_TYPE_WRITE) ? "write" :
-                                     (testType == TEST_TYPE_READ) ? "read" : 
-                                     "unknown"),
-      "           Date & time: " + new Date(System.currentTimeMillis()),
-      "       Number of files: " + tasks,
-      "Total MBytes processed: " + size/MEGA,
-      "     Throughput mb/sec: " + size * 1000.0 / (time * MEGA),
-      "Average IO rate mb/sec: " + med,
-      " Std IO rate deviation: " + stdDev,
-      "    Test exec time sec: " + (float)execTime / 1000,
-      "" };
-
-    PrintStream res = new PrintStream(
-                                      new FileOutputStream(
-                                                           new File(resFileName), true)); 
-    for(int i = 0; i < resultLines.length; i++) {
-      LOG.info(resultLines[i]);
-      res.println(resultLines[i]);
-    }
-  }
-
-  private static void cleanup(FileSystem fs) throws Exception {
-    LOG.info("Cleaning up test files");
-    fs.delete(new Path(TEST_ROOT_DIR), true);
-    fs.delete(HDFS_TEST_DIR, true);
-  }
-}

+ 0 - 353
src/test/hdfs-with-mr/org/apache/hadoop/fs/DistributedFSCheck.java

@@ -1,353 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import java.io.BufferedReader;
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.PrintStream;
-import java.util.Date;
-import java.util.StringTokenizer;
-import java.util.TreeSet;
-import java.util.Vector;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.mapred.*;
-
-/**
- * Distributed checkup of the file system consistency.
- * <p>
- * Test file system consistency by reading each block of each file
- * of the specified file tree. 
- * Report corrupted blocks and general file statistics.
- * <p>
- * Optionally displays statistics on read performance.
- * 
- */
-public class DistributedFSCheck extends TestCase {
-  // Constants
-  private static final Log LOG = LogFactory.getLog(DistributedFSCheck.class);
-  private static final int TEST_TYPE_READ = 0;
-  private static final int TEST_TYPE_CLEANUP = 2;
-  private static final int DEFAULT_BUFFER_SIZE = 1000000;
-  private static final String DEFAULT_RES_FILE_NAME = "DistributedFSCheck_results.log";
-  private static final long MEGA = 0x100000;
-  
-  private static Configuration fsConfig = new Configuration();
-  private static Path TEST_ROOT_DIR = new Path(System.getProperty("test.build.data","/benchmarks/DistributedFSCheck"));
-  private static Path MAP_INPUT_DIR = new Path(TEST_ROOT_DIR, "map_input");
-  private static Path READ_DIR = new Path(TEST_ROOT_DIR, "io_read");
-
-  private FileSystem fs;
-  private long nrFiles;
-  
-  DistributedFSCheck(Configuration conf) throws Exception {
-    fsConfig = conf;
-    this.fs = FileSystem.get(conf);
-  }
-
-  /**
-   * Run distributed checkup for the entire files system.
-   * 
-   * @throws Exception
-   */
-  public void testFSBlocks() throws Exception {
-    testFSBlocks("/");
-  }
-
-  /**
-   * Run distributed checkup for the specified directory.
-   * 
-   * @param rootName root directory name
-   * @throws Exception
-   */
-  public void testFSBlocks(String rootName) throws Exception {
-    createInputFile(rootName);
-    runDistributedFSCheck();
-    cleanup();  // clean up after all to restore the system state
-  }
-
-  private void createInputFile(String rootName) throws IOException {
-    cleanup();  // clean up if previous run failed
-
-    Path inputFile = new Path(MAP_INPUT_DIR, "in_file");
-    SequenceFile.Writer writer =
-      SequenceFile.createWriter(fs, fsConfig, inputFile, 
-                                Text.class, LongWritable.class, CompressionType.NONE);
-    
-    try {
-      nrFiles = 0;
-      listSubtree(new Path(rootName), writer);
-    } finally {
-      writer.close();
-    }
-    LOG.info("Created map input files.");
-  }
-  
-  private void listSubtree(Path rootFile,
-                           SequenceFile.Writer writer
-                           ) throws IOException {
-    FileStatus rootStatus = fs.getFileStatus(rootFile);
-    listSubtree(rootStatus, writer);
-  }
-
-  private void listSubtree(FileStatus rootStatus,
-                           SequenceFile.Writer writer
-                           ) throws IOException {
-    Path rootFile = rootStatus.getPath();
-    if (!rootStatus.isDir()) {
-      nrFiles++;
-      // For a regular file generate <fName,offset> pairs
-      long blockSize = fs.getDefaultBlockSize();
-      long fileLength = rootStatus.getLen();
-      for(long offset = 0; offset < fileLength; offset += blockSize)
-        writer.append(new Text(rootFile.toString()), new LongWritable(offset));
-      return;
-    }
-    
-    FileStatus children[] = fs.listStatus(rootFile);
-    if (children == null)
-      throw new IOException("Could not get listing for " + rootFile);
-    for (int i = 0; i < children.length; i++)
-      listSubtree(children[i], writer);
-  }
-
-  /**
-   * DistributedFSCheck mapper class.
-   */
-  public static class DistributedFSCheckMapper extends IOMapperBase {
-
-    public DistributedFSCheckMapper() { 
-      super(fsConfig); 
-    }
-
-    public Object doIO(Reporter reporter, 
-                       String name, 
-                       long offset 
-                       ) throws IOException {
-      // open file
-      FSDataInputStream in = null;
-      try {
-        in = fs.open(new Path(name));
-      } catch(IOException e) {
-        return name + "@(missing)";
-      }
-      in.seek(offset);
-      long actualSize = 0;
-      try {
-        long blockSize = fs.getDefaultBlockSize();
-        reporter.setStatus("reading " + name + "@" + 
-                           offset + "/" + blockSize);
-        for( int curSize = bufferSize; 
-             curSize == bufferSize && actualSize < blockSize;
-             actualSize += curSize) {
-          curSize = in.read(buffer, 0, bufferSize);
-        }
-      } catch(IOException e) {
-        LOG.info("Corrupted block detected in \"" + name + "\" at " + offset);
-        return name + "@" + offset;
-      } finally {
-        in.close();
-      }
-      return new Long(actualSize);
-    }
-    
-    void collectStats(OutputCollector<Text, Text> output, 
-                      String name, 
-                      long execTime, 
-                      Object corruptedBlock) throws IOException {
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "blocks"),
-          new Text(String.valueOf(1)));
-
-      if (corruptedBlock.getClass().getName().endsWith("String")) {
-        output.collect(
-            new Text(AccumulatingReducer.VALUE_TYPE_STRING + "badBlocks"),
-            new Text((String)corruptedBlock));
-        return;
-      }
-      long totalSize = ((Long)corruptedBlock).longValue();
-      float ioRateMbSec = (float)totalSize * 1000 / (execTime * 0x100000);
-      LOG.info("Number of bytes processed = " + totalSize);
-      LOG.info("Exec time = " + execTime);
-      LOG.info("IO rate = " + ioRateMbSec);
-      
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"),
-          new Text(String.valueOf(totalSize)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"),
-          new Text(String.valueOf(execTime)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"),
-          new Text(String.valueOf(ioRateMbSec*1000)));
-    }
-  }
-  
-  private void runDistributedFSCheck() throws Exception {
-    JobConf job = new JobConf(fs.getConf(), DistributedFSCheck.class);
-
-    FileInputFormat.setInputPaths(job, MAP_INPUT_DIR);
-    job.setInputFormat(SequenceFileInputFormat.class);
-
-    job.setMapperClass(DistributedFSCheckMapper.class);
-    job.setReducerClass(AccumulatingReducer.class);
-
-    FileOutputFormat.setOutputPath(job, READ_DIR);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(Text.class);
-    job.setNumReduceTasks(1);
-    JobClient.runJob(job);
-  }
-
-  public static void main(String[] args) throws Exception {
-    int testType = TEST_TYPE_READ;
-    int bufferSize = DEFAULT_BUFFER_SIZE;
-    String resFileName = DEFAULT_RES_FILE_NAME;
-    String rootName = "/";
-    boolean viewStats = false;
-
-    String usage = "Usage: DistributedFSCheck [-root name] [-clean] [-resFile resultFileName] [-bufferSize Bytes] [-stats] ";
-    
-    if (args.length == 1 && args[0].startsWith("-h")) {
-      System.err.println(usage);
-      System.exit(-1);
-    }
-    for(int i = 0; i < args.length; i++) {       // parse command line
-      if (args[i].equals("-root")) {
-        rootName = args[++i];
-      } else if (args[i].startsWith("-clean")) {
-        testType = TEST_TYPE_CLEANUP;
-      } else if (args[i].equals("-bufferSize")) {
-        bufferSize = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-resFile")) {
-        resFileName = args[++i];
-      } else if (args[i].startsWith("-stat")) {
-        viewStats = true;
-      }
-    }
-
-    LOG.info("root = " + rootName);
-    LOG.info("bufferSize = " + bufferSize);
-  
-    Configuration conf = new Configuration();  
-    conf.setInt("test.io.file.buffer.size", bufferSize);
-    DistributedFSCheck test = new DistributedFSCheck(conf);
-
-    if (testType == TEST_TYPE_CLEANUP) {
-      test.cleanup();
-      return;
-    }
-    test.createInputFile(rootName);
-    long tStart = System.currentTimeMillis();
-    test.runDistributedFSCheck();
-    long execTime = System.currentTimeMillis() - tStart;
-    
-    test.analyzeResult(execTime, resFileName, viewStats);
-    // test.cleanup();  // clean up after all to restore the system state
-  }
-  
-  private void analyzeResult(long execTime,
-                             String resFileName,
-                             boolean viewStats
-                             ) throws IOException {
-    Path reduceFile= new Path(READ_DIR, "part-00000");
-    DataInputStream in;
-    in = new DataInputStream(fs.open(reduceFile));
-  
-    BufferedReader lines;
-    lines = new BufferedReader(new InputStreamReader(in));
-    long blocks = 0;
-    long size = 0;
-    long time = 0;
-    float rate = 0;
-    StringTokenizer  badBlocks = null;
-    long nrBadBlocks = 0;
-    String line;
-    while((line = lines.readLine()) != null) {
-      StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
-      String attr = tokens.nextToken(); 
-      if (attr.endsWith("blocks"))
-        blocks = Long.parseLong(tokens.nextToken());
-      else if (attr.endsWith("size"))
-        size = Long.parseLong(tokens.nextToken());
-      else if (attr.endsWith("time"))
-        time = Long.parseLong(tokens.nextToken());
-      else if (attr.endsWith("rate"))
-        rate = Float.parseFloat(tokens.nextToken());
-      else if (attr.endsWith("badBlocks")) {
-        badBlocks = new StringTokenizer(tokens.nextToken(), ";");
-        nrBadBlocks = badBlocks.countTokens();
-      }
-    }
-    
-    Vector<String> resultLines = new Vector<String>();
-    resultLines.add( "----- DistributedFSCheck ----- : ");
-    resultLines.add( "               Date & time: " + new Date(System.currentTimeMillis()));
-    resultLines.add( "    Total number of blocks: " + blocks);
-    resultLines.add( "    Total number of  files: " + nrFiles);
-    resultLines.add( "Number of corrupted blocks: " + nrBadBlocks);
-    
-    int nrBadFilesPos = resultLines.size();
-    TreeSet<String> badFiles = new TreeSet<String>();
-    long nrBadFiles = 0;
-    if (nrBadBlocks > 0) {
-      resultLines.add("");
-      resultLines.add("----- Corrupted Blocks (file@offset) ----- : ");
-      while(badBlocks.hasMoreTokens()) {
-        String curBlock = badBlocks.nextToken();
-        resultLines.add(curBlock);
-        badFiles.add(curBlock.substring(0, curBlock.indexOf('@')));
-      }
-      nrBadFiles = badFiles.size();
-    }
-    
-    resultLines.insertElementAt(" Number of corrupted files: " + nrBadFiles, nrBadFilesPos);
-    
-    if (viewStats) {
-      resultLines.add("");
-      resultLines.add("-----   Performance  ----- : ");
-      resultLines.add("         Total MBytes read: " + size/MEGA);
-      resultLines.add("         Throughput mb/sec: " + (float)size * 1000.0 / (time * MEGA));
-      resultLines.add("    Average IO rate mb/sec: " + rate / 1000 / blocks);
-      resultLines.add("        Test exec time sec: " + (float)execTime / 1000);
-    }
-
-    PrintStream res = new PrintStream(
-                                      new FileOutputStream(
-                                                           new File(resFileName), true)); 
-    for(int i = 0; i < resultLines.size(); i++) {
-      String cur = resultLines.get(i);
-      LOG.info(cur);
-      res.println(cur);
-    }
-  }
-
-  private void cleanup() throws IOException {
-    LOG.info("Cleaning up test files");
-    fs.delete(TEST_ROOT_DIR, true);
-  }
-}

+ 0 - 129
src/test/hdfs-with-mr/org/apache/hadoop/fs/IOMapperBase.java

@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.fs;
-
-import java.io.IOException;
-import java.net.InetAddress;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Mapper;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.Reporter;
-
-/**
- * Base mapper class for IO operations.
- * <p>
- * Two abstract method {@link #doIO(Reporter, String, long)} and 
- * {@link #collectStats(OutputCollector,String,long,Object)} should be
- * overloaded in derived classes to define the IO operation and the
- * statistics data to be collected by subsequent reducers.
- * 
- */
-public abstract class IOMapperBase extends Configured
-    implements Mapper<Text, LongWritable, Text, Text> {
-  
-  protected byte[] buffer;
-  protected int bufferSize;
-  protected FileSystem fs;
-  protected String hostName;
-
-  public IOMapperBase(Configuration conf) { 
-    super(conf); 
-    try {
-      fs = FileSystem.get(conf);
-    } catch (Exception e) {
-      throw new RuntimeException("Cannot create file system.", e);
-    }
-    bufferSize = conf.getInt("test.io.file.buffer.size", 4096);
-    buffer = new byte[bufferSize];
-    try {
-      hostName = InetAddress.getLocalHost().getHostName();
-    } catch(Exception e) {
-      hostName = "localhost";
-    }
-  }
-
-  public void configure(JobConf job) {
-    setConf(job);
-  }
-
-  public void close() throws IOException {
-  }
-  
-  /**
-   * Perform io operation, usually read or write.
-   * 
-   * @param reporter
-   * @param name file name
-   * @param value offset within the file
-   * @return object that is passed as a parameter to 
-   *          {@link #collectStats(OutputCollector,String,long,Object)}
-   * @throws IOException
-   */
-  abstract Object doIO(Reporter reporter, 
-                       String name, 
-                       long value) throws IOException;
-
-  /**
-   * Collect stat data to be combined by a subsequent reducer.
-   * 
-   * @param output
-   * @param name file name
-   * @param execTime IO execution time
-   * @param doIOReturnValue value returned by {@link #doIO(Reporter,String,long)}
-   * @throws IOException
-   */
-  abstract void collectStats(OutputCollector<Text, Text> output, 
-                             String name, 
-                             long execTime, 
-                             Object doIOReturnValue) throws IOException;
-  
-  /**
-   * Map file name and offset into statistical data.
-   * <p>
-   * The map task is to get the 
-   * <tt>key</tt>, which contains the file name, and the 
-   * <tt>value</tt>, which is the offset within the file.
-   * 
-   * The parameters are passed to the abstract method 
-   * {@link #doIO(Reporter,String,long)}, which performs the io operation, 
-   * usually read or write data, and then 
-   * {@link #collectStats(OutputCollector,String,long,Object)} 
-   * is called to prepare stat data for a subsequent reducer.
-   */
-  public void map(Text key, 
-                  LongWritable value,
-                  OutputCollector<Text, Text> output, 
-                  Reporter reporter) throws IOException {
-    String name = key.toString();
-    long longValue = value.get();
-    
-    reporter.setStatus("starting " + name + " ::host = " + hostName);
-    
-    long tStart = System.currentTimeMillis();
-    Object statValue = doIO(reporter, name, longValue);
-    long tEnd = System.currentTimeMillis();
-    long execTime = tEnd - tStart;
-    collectStats(output, name, execTime, statValue);
-    
-    reporter.setStatus("finished " + name + " ::host = " + hostName);
-  }
-}

+ 0 - 853
src/test/hdfs-with-mr/org/apache/hadoop/fs/TestCopyFiles.java

@@ -1,853 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import java.io.ByteArrayOutputStream;
-import java.io.FileNotFoundException;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.net.URI;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-import java.util.StringTokenizer;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.mapred.MiniMRCluster;
-import org.apache.hadoop.security.UnixUserGroupInformation;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.tools.DistCp;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.log4j.Level;
-
-
-/**
- * A JUnit test for copying files recursively.
- */
-public class TestCopyFiles extends TestCase {
-  {
-    ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
-        ).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)DistCp.LOG).getLogger().setLevel(Level.ALL);
-  }
-  
-  static final URI LOCAL_FS = URI.create("file:///");
-  
-  private static final Random RAN = new Random();
-  private static final int NFILES = 20;
-  private static String TEST_ROOT_DIR =
-    new Path(System.getProperty("test.build.data","/tmp"))
-    .toString().replace(' ', '+');
-
-  /** class MyFile contains enough information to recreate the contents of
-   * a single file.
-   */
-  private static class MyFile {
-    private static Random gen = new Random();
-    private static final int MAX_LEVELS = 3;
-    private static final int MAX_SIZE = 8*1024;
-    private static String[] dirNames = {
-      "zero", "one", "two", "three", "four", "five", "six", "seven", "eight", "nine"
-    };
-    private final String name;
-    private int size = 0;
-    private long seed = 0L;
-
-    MyFile() {
-      this(gen.nextInt(MAX_LEVELS));
-    }
-    MyFile(int nLevels) {
-      String xname = "";
-      if (nLevels != 0) {
-        int[] levels = new int[nLevels];
-        for (int idx = 0; idx < nLevels; idx++) {
-          levels[idx] = gen.nextInt(10);
-        }
-        StringBuffer sb = new StringBuffer();
-        for (int idx = 0; idx < nLevels; idx++) {
-          sb.append(dirNames[levels[idx]]);
-          sb.append("/");
-        }
-        xname = sb.toString();
-      }
-      long fidx = gen.nextLong() & Long.MAX_VALUE;
-      name = xname + Long.toString(fidx);
-      reset();
-    }
-    void reset() {
-      final int oldsize = size;
-      do { size = gen.nextInt(MAX_SIZE); } while (oldsize == size);
-      final long oldseed = seed;
-      do { seed = gen.nextLong() & Long.MAX_VALUE; } while (oldseed == seed);
-    }
-    String getName() { return name; }
-    int getSize() { return size; }
-    long getSeed() { return seed; }
-  }
-
-  private static MyFile[] createFiles(URI fsname, String topdir)
-    throws IOException {
-    return createFiles(FileSystem.get(fsname, new Configuration()), topdir);
-  }
-
-  /** create NFILES with random names and directory hierarchies
-   * with random (but reproducible) data in them.
-   */
-  private static MyFile[] createFiles(FileSystem fs, String topdir)
-    throws IOException {
-    Path root = new Path(topdir);
-    MyFile[] files = new MyFile[NFILES];
-    for (int i = 0; i < NFILES; i++) {
-      files[i] = createFile(root, fs);
-    }
-    return files;
-  }
-
-  static MyFile createFile(Path root, FileSystem fs, int levels)
-      throws IOException {
-    MyFile f = levels < 0 ? new MyFile() : new MyFile(levels);
-    Path p = new Path(root, f.getName());
-    FSDataOutputStream out = fs.create(p);
-    byte[] toWrite = new byte[f.getSize()];
-    new Random(f.getSeed()).nextBytes(toWrite);
-    out.write(toWrite);
-    out.close();
-    FileSystem.LOG.info("created: " + p + ", size=" + f.getSize());
-    return f;
-  }
-
-  static MyFile createFile(Path root, FileSystem fs) throws IOException {
-    return createFile(root, fs, -1);
-  }
-
-  private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files
-      ) throws IOException {
-    return checkFiles(fs, topdir, files, false);    
-  }
-
-  private static boolean checkFiles(FileSystem fs, String topdir, MyFile[] files,
-      boolean existingOnly) throws IOException {
-    Path root = new Path(topdir);
-    
-    for (int idx = 0; idx < files.length; idx++) {
-      Path fPath = new Path(root, files[idx].getName());
-      try {
-        fs.getFileStatus(fPath);
-        FSDataInputStream in = fs.open(fPath);
-        byte[] toRead = new byte[files[idx].getSize()];
-        byte[] toCompare = new byte[files[idx].getSize()];
-        Random rb = new Random(files[idx].getSeed());
-        rb.nextBytes(toCompare);
-        assertEquals("Cannnot read file.", toRead.length, in.read(toRead));
-        in.close();
-        for (int i = 0; i < toRead.length; i++) {
-          if (toRead[i] != toCompare[i]) {
-            return false;
-          }
-        }
-        toRead = null;
-        toCompare = null;
-      }
-      catch(FileNotFoundException fnfe) {
-        if (!existingOnly) {
-          throw fnfe;
-        }
-      }
-    }
-    
-    return true;
-  }
-
-  private static void updateFiles(FileSystem fs, String topdir, MyFile[] files,
-        int nupdate) throws IOException {
-    assert nupdate <= NFILES;
-
-    Path root = new Path(topdir);
-
-    for (int idx = 0; idx < nupdate; ++idx) {
-      Path fPath = new Path(root, files[idx].getName());
-      // overwrite file
-      assertTrue(fPath.toString() + " does not exist", fs.exists(fPath));
-      FSDataOutputStream out = fs.create(fPath);
-      files[idx].reset();
-      byte[] toWrite = new byte[files[idx].getSize()];
-      Random rb = new Random(files[idx].getSeed());
-      rb.nextBytes(toWrite);
-      out.write(toWrite);
-      out.close();
-    }
-  }
-
-  private static FileStatus[] getFileStatus(FileSystem fs,
-      String topdir, MyFile[] files) throws IOException {
-    return getFileStatus(fs, topdir, files, false);
-  }
-  private static FileStatus[] getFileStatus(FileSystem fs,
-      String topdir, MyFile[] files, boolean existingOnly) throws IOException {
-    Path root = new Path(topdir);
-    List<FileStatus> statuses = new ArrayList<FileStatus>();
-    for (int idx = 0; idx < NFILES; ++idx) {
-      try {
-        statuses.add(fs.getFileStatus(new Path(root, files[idx].getName())));
-      } catch(FileNotFoundException fnfe) {
-        if (!existingOnly) {
-          throw fnfe;
-        }
-      }
-    }
-    return statuses.toArray(new FileStatus[statuses.size()]);
-  }
-
-  private static boolean checkUpdate(FileSystem fs, FileStatus[] old,
-      String topdir, MyFile[] upd, final int nupdate) throws IOException {
-    Path root = new Path(topdir);
-
-    // overwrote updated files
-    for (int idx = 0; idx < nupdate; ++idx) {
-      final FileStatus stat =
-        fs.getFileStatus(new Path(root, upd[idx].getName()));
-      if (stat.getModificationTime() <= old[idx].getModificationTime()) {
-        return false;
-      }
-    }
-    // did not overwrite files not updated
-    for (int idx = nupdate; idx < NFILES; ++idx) {
-      final FileStatus stat =
-        fs.getFileStatus(new Path(root, upd[idx].getName()));
-      if (stat.getModificationTime() != old[idx].getModificationTime()) {
-        return false;
-      }
-    }
-    return true;
-  }
-
-  /** delete directory and everything underneath it.*/
-  private static void deldir(FileSystem fs, String topdir) throws IOException {
-    fs.delete(new Path(topdir), true);
-  }
-  
-  /** copy files from local file system to local file system */
-  public void testCopyFromLocalToLocal() throws Exception {
-    Configuration conf = new Configuration();
-    FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
-    MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
-    ToolRunner.run(new DistCp(new Configuration()),
-                           new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
-                                         "file:///"+TEST_ROOT_DIR+"/destdat"});
-    assertTrue("Source and destination directories do not match.",
-               checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files));
-    deldir(localfs, TEST_ROOT_DIR+"/destdat");
-    deldir(localfs, TEST_ROOT_DIR+"/srcdat");
-  }
-  
-  /** copy files from dfs file system to dfs file system */
-  public void testCopyFromDfsToDfs() throws Exception {
-    String namenode = null;
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster(conf, 2, true, null);
-      final FileSystem hdfs = cluster.getFileSystem();
-      namenode = FileSystem.getDefaultUri(conf).toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat", files));
-        FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
-        assertTrue("Log directory does not exist.",
-                   fs.exists(new Path(namenode+"/logs")));
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/srcdat");
-        deldir(hdfs, "/logs");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-  
-  /** copy files from local file system to dfs file system */
-  public void testCopyFromLocalToDfs() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster(conf, 1, true, null);
-      final FileSystem hdfs = cluster.getFileSystem();
-      final String namenode = hdfs.getUri().toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(LOCAL_FS, TEST_ROOT_DIR+"/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-log",
-                                         namenode+"/logs",
-                                         "file:///"+TEST_ROOT_DIR+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(cluster.getFileSystem(), "/destdat", files));
-        assertTrue("Log directory does not exist.",
-                    hdfs.exists(new Path(namenode+"/logs")));
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/logs");
-        deldir(FileSystem.get(LOCAL_FS, conf), TEST_ROOT_DIR+"/srcdat");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  /** copy files from dfs file system to local file system */
-  public void testCopyFromDfsToLocal() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      final FileSystem localfs = FileSystem.get(LOCAL_FS, conf);
-      cluster = new MiniDFSCluster(conf, 1, true, null);
-      final FileSystem hdfs = cluster.getFileSystem();
-      final String namenode = FileSystem.getDefaultUri(conf).toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-log",
-                                         "/logs",
-                                         namenode+"/srcdat",
-                                         "file:///"+TEST_ROOT_DIR+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(localfs, TEST_ROOT_DIR+"/destdat", files));
-        assertTrue("Log directory does not exist.",
-                    hdfs.exists(new Path("/logs")));
-        deldir(localfs, TEST_ROOT_DIR+"/destdat");
-        deldir(hdfs, "/logs");
-        deldir(hdfs, "/srcdat");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  public void testCopyDfsToDfsUpdateOverwrite() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      Configuration conf = new Configuration();
-      cluster = new MiniDFSCluster(conf, 2, true, null);
-      final FileSystem hdfs = cluster.getFileSystem();
-      final String namenode = hdfs.getUri().toString();
-      if (namenode.startsWith("hdfs://")) {
-        MyFile[] files = createFiles(URI.create(namenode), "/srcdat");
-        ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-p",
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat", files));
-        FileSystem fs = FileSystem.get(URI.create(namenode+"/logs"), conf);
-        assertTrue("Log directory does not exist.",
-                    fs.exists(new Path(namenode+"/logs")));
-
-        FileStatus[] dchkpoint = getFileStatus(hdfs, "/destdat", files);
-        final int nupdate = NFILES>>2;
-        updateFiles(cluster.getFileSystem(), "/srcdat", files, nupdate);
-        deldir(hdfs, "/logs");
-
-        ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-p",
-                                         "-update",
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat", files));
-        assertTrue("Update failed to replicate all changes in src",
-                 checkUpdate(hdfs, dchkpoint, "/destdat", files, nupdate));
-
-        deldir(hdfs, "/logs");
-        ToolRunner.run(new DistCp(conf), new String[] {
-                                         "-p",
-                                         "-overwrite",
-                                         "-log",
-                                         namenode+"/logs",
-                                         namenode+"/srcdat",
-                                         namenode+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(hdfs, "/destdat", files));
-        assertTrue("-overwrite didn't.",
-                 checkUpdate(hdfs, dchkpoint, "/destdat", files, NFILES));
-
-        deldir(hdfs, "/destdat");
-        deldir(hdfs, "/srcdat");
-        deldir(hdfs, "/logs");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  public void testCopyDuplication() throws Exception {
-    final FileSystem localfs = FileSystem.get(LOCAL_FS, new Configuration());
-    try {    
-      MyFile[] files = createFiles(localfs, TEST_ROOT_DIR+"/srcdat");
-      ToolRunner.run(new DistCp(new Configuration()),
-          new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
-                        "file:///"+TEST_ROOT_DIR+"/src2/srcdat"});
-      assertTrue("Source and destination directories do not match.",
-                 checkFiles(localfs, TEST_ROOT_DIR+"/src2/srcdat", files));
-  
-      assertEquals(DistCp.DuplicationException.ERROR_CODE,
-          ToolRunner.run(new DistCp(new Configuration()),
-          new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
-                        "file:///"+TEST_ROOT_DIR+"/src2/srcdat",
-                        "file:///"+TEST_ROOT_DIR+"/destdat",}));
-    }
-    finally {
-      deldir(localfs, TEST_ROOT_DIR+"/destdat");
-      deldir(localfs, TEST_ROOT_DIR+"/srcdat");
-      deldir(localfs, TEST_ROOT_DIR+"/src2");
-    }
-  }
-
-  public void testCopySingleFile() throws Exception {
-    FileSystem fs = FileSystem.get(LOCAL_FS, new Configuration());
-    Path root = new Path(TEST_ROOT_DIR+"/srcdat");
-    try {    
-      MyFile[] files = {createFile(root, fs)};
-      //copy a dir with a single file
-      ToolRunner.run(new DistCp(new Configuration()),
-          new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat",
-                        "file:///"+TEST_ROOT_DIR+"/destdat"});
-      assertTrue("Source and destination directories do not match.",
-                 checkFiles(fs, TEST_ROOT_DIR+"/destdat", files));
-      
-      //copy a single file
-      String fname = files[0].getName();
-      Path p = new Path(root, fname);
-      FileSystem.LOG.info("fname=" + fname + ", exists? " + fs.exists(p));
-      ToolRunner.run(new DistCp(new Configuration()),
-          new String[] {"file:///"+TEST_ROOT_DIR+"/srcdat/"+fname,
-                        "file:///"+TEST_ROOT_DIR+"/dest2/"+fname});
-      assertTrue("Source and destination directories do not match.",
-          checkFiles(fs, TEST_ROOT_DIR+"/dest2", files));     
-      //copy single file to existing dir
-      deldir(fs, TEST_ROOT_DIR+"/dest2");
-      fs.mkdirs(new Path(TEST_ROOT_DIR+"/dest2"));
-      MyFile[] files2 = {createFile(root, fs, 0)};
-      String sname = files2[0].getName();
-      ToolRunner.run(new DistCp(new Configuration()),
-          new String[] {"-update",
-                        "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname,
-                        "file:///"+TEST_ROOT_DIR+"/dest2/"});
-      assertTrue("Source and destination directories do not match.",
-          checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2));     
-      updateFiles(fs, TEST_ROOT_DIR+"/srcdat", files2, 1);
-      //copy single file to existing dir w/ dst name conflict
-      ToolRunner.run(new DistCp(new Configuration()),
-          new String[] {"-update",
-                        "file:///"+TEST_ROOT_DIR+"/srcdat/"+sname,
-                        "file:///"+TEST_ROOT_DIR+"/dest2/"});
-      assertTrue("Source and destination directories do not match.",
-          checkFiles(fs, TEST_ROOT_DIR+"/dest2", files2));     
-    }
-    finally {
-      deldir(fs, TEST_ROOT_DIR+"/destdat");
-      deldir(fs, TEST_ROOT_DIR+"/dest2");
-      deldir(fs, TEST_ROOT_DIR+"/srcdat");
-    }
-  }
-
-  public void testPreserveOption() throws Exception {
-    Configuration conf = new Configuration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster(conf, 2, true, null);
-      String nnUri = FileSystem.getDefaultUri(conf).toString();
-      FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
-
-      {//test preserving user
-        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
-        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
-        for(int i = 0; i < srcstat.length; i++) {
-          fs.setOwner(srcstat[i].getPath(), "u" + i, null);
-        }
-        ToolRunner.run(new DistCp(conf),
-            new String[]{"-pu", nnUri+"/srcdat", nnUri+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(fs, "/destdat", files));
-        
-        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
-        for(int i = 0; i < dststat.length; i++) {
-          assertEquals("i=" + i, "u" + i, dststat[i].getOwner());
-        }
-        deldir(fs, "/destdat");
-        deldir(fs, "/srcdat");
-      }
-
-      {//test preserving group
-        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
-        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
-        for(int i = 0; i < srcstat.length; i++) {
-          fs.setOwner(srcstat[i].getPath(), null, "g" + i);
-        }
-        ToolRunner.run(new DistCp(conf),
-            new String[]{"-pg", nnUri+"/srcdat", nnUri+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(fs, "/destdat", files));
-        
-        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
-        for(int i = 0; i < dststat.length; i++) {
-          assertEquals("i=" + i, "g" + i, dststat[i].getGroup());
-        }
-        deldir(fs, "/destdat");
-        deldir(fs, "/srcdat");
-      }
-
-      {//test preserving mode
-        MyFile[] files = createFiles(URI.create(nnUri), "/srcdat");
-        FileStatus[] srcstat = getFileStatus(fs, "/srcdat", files);
-        FsPermission[] permissions = new FsPermission[srcstat.length];
-        for(int i = 0; i < srcstat.length; i++) {
-          permissions[i] = new FsPermission((short)(i & 0666));
-          fs.setPermission(srcstat[i].getPath(), permissions[i]);
-        }
-
-        ToolRunner.run(new DistCp(conf),
-            new String[]{"-pp", nnUri+"/srcdat", nnUri+"/destdat"});
-        assertTrue("Source and destination directories do not match.",
-                   checkFiles(fs, "/destdat", files));
-  
-        FileStatus[] dststat = getFileStatus(fs, "/destdat", files);
-        for(int i = 0; i < dststat.length; i++) {
-          assertEquals("i=" + i, permissions[i], dststat[i].getPermission());
-        }
-        deldir(fs, "/destdat");
-        deldir(fs, "/srcdat");
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  public void testMapCount() throws Exception {
-    String namenode = null;
-    MiniDFSCluster dfs = null;
-    MiniMRCluster mr = null;
-    try {
-      Configuration conf = new Configuration();
-      dfs = new MiniDFSCluster(conf, 3, true, null);
-      FileSystem fs = dfs.getFileSystem();
-      final FsShell shell = new FsShell(conf);
-      namenode = fs.getUri().toString();
-      mr = new MiniMRCluster(3, namenode, 1);
-      MyFile[] files = createFiles(fs.getUri(), "/srcdat");
-      long totsize = 0;
-      for (MyFile f : files) {
-        totsize += f.getSize();
-      }
-      Configuration job = mr.createJobConf();
-      job.setLong("distcp.bytes.per.map", totsize / 3);
-      ToolRunner.run(new DistCp(job),
-          new String[] {"-m", "100",
-                        "-log",
-                        namenode+"/logs",
-                        namenode+"/srcdat",
-                        namenode+"/destdat"});
-      assertTrue("Source and destination directories do not match.",
-                 checkFiles(fs, "/destdat", files));
-
-      String logdir = namenode + "/logs";
-      System.out.println(execCmd(shell, "-lsr", logdir));
-      FileStatus[] logs = fs.listStatus(new Path(logdir));
-      // rare case where splits are exact, logs.length can be 4
-      assertTrue("Unexpected map count, logs.length=" + logs.length,
-          logs.length == 5 || logs.length == 4);
-
-      deldir(fs, "/destdat");
-      deldir(fs, "/logs");
-      ToolRunner.run(new DistCp(job),
-          new String[] {"-m", "1",
-                        "-log",
-                        namenode+"/logs",
-                        namenode+"/srcdat",
-                        namenode+"/destdat"});
-
-      System.out.println(execCmd(shell, "-lsr", logdir));
-      logs = fs.listStatus(new Path(namenode+"/logs"));
-      assertTrue("Unexpected map count, logs.length=" + logs.length,
-          logs.length == 2);
-    } finally {
-      if (dfs != null) { dfs.shutdown(); }
-      if (mr != null) { mr.shutdown(); }
-    }
-  }
-
-  public void testLimits() throws Exception {
-    Configuration conf = new Configuration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster(conf, 2, true, null);
-      final String nnUri = FileSystem.getDefaultUri(conf).toString();
-      final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
-      final DistCp distcp = new DistCp(conf);
-      final FsShell shell = new FsShell(conf);  
-
-      final String srcrootdir =  "/src_root";
-      final Path srcrootpath = new Path(srcrootdir); 
-      final String dstrootdir =  "/dst_root";
-      final Path dstrootpath = new Path(dstrootdir); 
-
-      {//test -filelimit
-        MyFile[] files = createFiles(URI.create(nnUri), srcrootdir);
-        int filelimit = files.length / 2;
-        System.out.println("filelimit=" + filelimit);
-
-        ToolRunner.run(distcp,
-            new String[]{"-filelimit", ""+filelimit, nnUri+srcrootdir, nnUri+dstrootdir});
-        String results = execCmd(shell, "-lsr", dstrootdir);
-        results = removePrefix(results, dstrootdir);
-        System.out.println("results=" +  results);
-
-        FileStatus[] dststat = getFileStatus(fs, dstrootdir, files, true);
-        assertEquals(filelimit, dststat.length);
-        deldir(fs, dstrootdir);
-        deldir(fs, srcrootdir);
-      }
-
-      {//test -sizelimit
-        createFiles(URI.create(nnUri), srcrootdir);
-        long sizelimit = fs.getContentSummary(srcrootpath).getLength()/2;
-        System.out.println("sizelimit=" + sizelimit);
-
-        ToolRunner.run(distcp,
-            new String[]{"-sizelimit", ""+sizelimit, nnUri+srcrootdir, nnUri+dstrootdir});
-        
-        ContentSummary summary = fs.getContentSummary(dstrootpath);
-        System.out.println("summary=" + summary);
-        assertTrue(summary.getLength() <= sizelimit);
-        deldir(fs, dstrootdir);
-        deldir(fs, srcrootdir);
-      }
-
-      {//test update
-        final MyFile[] srcs = createFiles(URI.create(nnUri), srcrootdir);
-        final long totalsize = fs.getContentSummary(srcrootpath).getLength();
-        System.out.println("src.length=" + srcs.length);
-        System.out.println("totalsize =" + totalsize);
-        fs.mkdirs(dstrootpath);
-        final int parts = RAN.nextInt(NFILES/3 - 1) + 2;
-        final int filelimit = srcs.length/parts;
-        final long sizelimit = totalsize/parts;
-        System.out.println("filelimit=" + filelimit);
-        System.out.println("sizelimit=" + sizelimit);
-        System.out.println("parts    =" + parts);
-        final String[] args = {"-filelimit", ""+filelimit, "-sizelimit", ""+sizelimit,
-            "-update", nnUri+srcrootdir, nnUri+dstrootdir};
-
-        int dstfilecount = 0;
-        long dstsize = 0;
-        for(int i = 0; i <= parts; i++) {
-          ToolRunner.run(distcp, args);
-        
-          FileStatus[] dststat = getFileStatus(fs, dstrootdir, srcs, true);
-          System.out.println(i + ") dststat.length=" + dststat.length);
-          assertTrue(dststat.length - dstfilecount <= filelimit);
-          ContentSummary summary = fs.getContentSummary(dstrootpath);
-          System.out.println(i + ") summary.getLength()=" + summary.getLength());
-          assertTrue(summary.getLength() - dstsize <= sizelimit);
-          assertTrue(checkFiles(fs, dstrootdir, srcs, true));
-          dstfilecount = dststat.length;
-          dstsize = summary.getLength();
-        }
-
-        deldir(fs, dstrootdir);
-        deldir(fs, srcrootdir);
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  static final long now = System.currentTimeMillis();
-
-  static UnixUserGroupInformation createUGI(String name, boolean issuper) {
-    String username = name + now;
-    String group = issuper? "supergroup": username;
-    return UnixUserGroupInformation.createImmutable(
-        new String[]{username, group});
-  }
-
-  static Path createHomeDirectory(FileSystem fs, UserGroupInformation ugi
-      ) throws IOException {
-    final Path home = new Path("/user/" + ugi.getUserName());
-    fs.mkdirs(home);
-    fs.setOwner(home, ugi.getUserName(), ugi.getGroupNames()[0]);
-    fs.setPermission(home, new FsPermission((short)0700));
-    return home;
-  }
-
-  public void testHftpAccessControl() throws Exception {
-    MiniDFSCluster cluster = null;
-    try {
-      final UnixUserGroupInformation DFS_UGI = createUGI("dfs", true); 
-      final UnixUserGroupInformation USER_UGI = createUGI("user", false); 
-
-      //start cluster by DFS_UGI
-      final Configuration dfsConf = new Configuration();
-      UnixUserGroupInformation.saveToConf(dfsConf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, DFS_UGI);
-      cluster = new MiniDFSCluster(dfsConf, 2, true, null);
-      cluster.waitActive();
-
-      final String httpAdd = dfsConf.get("dfs.http.address");
-      final URI nnURI = FileSystem.getDefaultUri(dfsConf);
-      final String nnUri = nnURI.toString();
-      final Path home = createHomeDirectory(FileSystem.get(nnURI, dfsConf), USER_UGI);
-      
-      //now, login as USER_UGI
-      final Configuration userConf = new Configuration();
-      UnixUserGroupInformation.saveToConf(userConf,
-          UnixUserGroupInformation.UGI_PROPERTY_NAME, USER_UGI);
-      final FileSystem fs = FileSystem.get(nnURI, userConf);
-
-      final Path srcrootpath = new Path(home, "src_root"); 
-      final String srcrootdir =  srcrootpath.toString();
-      final Path dstrootpath = new Path(home, "dst_root"); 
-      final String dstrootdir =  dstrootpath.toString();
-      final DistCp distcp = new DistCp(userConf);
-
-      FileSystem.mkdirs(fs, srcrootpath, new FsPermission((short)0700));
-      final String[] args = {"hftp://"+httpAdd+srcrootdir, nnUri+dstrootdir};
-
-      { //copy with permission 000, should fail
-        fs.setPermission(srcrootpath, new FsPermission((short)0));
-        assertEquals(-3, ToolRunner.run(distcp, args));
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  /** test -delete */
-  public void testDelete() throws Exception {
-    final Configuration conf = new Configuration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster(conf, 2, true, null);
-      final URI nnURI = FileSystem.getDefaultUri(conf);
-      final String nnUri = nnURI.toString();
-      final FileSystem fs = FileSystem.get(URI.create(nnUri), conf);
-
-      final DistCp distcp = new DistCp(conf);
-      final FsShell shell = new FsShell(conf);  
-
-      final String srcrootdir = "/src_root";
-      final String dstrootdir = "/dst_root";
-
-      {
-        //create source files
-        createFiles(nnURI, srcrootdir);
-        String srcresults = execCmd(shell, "-lsr", srcrootdir);
-        srcresults = removePrefix(srcresults, srcrootdir);
-        System.out.println("srcresults=" +  srcresults);
-
-        //create some files in dst
-        createFiles(nnURI, dstrootdir);
-        System.out.println("dstrootdir=" +  dstrootdir);
-        shell.run(new String[]{"-lsr", dstrootdir});
-
-        //run distcp
-        ToolRunner.run(distcp,
-            new String[]{"-delete", "-update", "-log", "/log",
-                         nnUri+srcrootdir, nnUri+dstrootdir});
-
-        //make sure src and dst contains the same files
-        String dstresults = execCmd(shell, "-lsr", dstrootdir);
-        dstresults = removePrefix(dstresults, dstrootdir);
-        System.out.println("first dstresults=" +  dstresults);
-        assertEquals(srcresults, dstresults);
-
-        //create additional file in dst
-        create(fs, new Path(dstrootdir, "foo"));
-        create(fs, new Path(dstrootdir, "foobar"));
-
-        //run distcp again
-        ToolRunner.run(distcp,
-            new String[]{"-delete", "-update", "-log", "/log2",
-                         nnUri+srcrootdir, nnUri+dstrootdir});
-        
-        //make sure src and dst contains the same files
-        dstresults = execCmd(shell, "-lsr", dstrootdir);
-        dstresults = removePrefix(dstresults, dstrootdir);
-        System.out.println("second dstresults=" +  dstresults);
-        assertEquals(srcresults, dstresults);
-
-        //cleanup
-        deldir(fs, dstrootdir);
-        deldir(fs, srcrootdir);
-      }
-    } finally {
-      if (cluster != null) { cluster.shutdown(); }
-    }
-  }
-
-  static void create(FileSystem fs, Path f) throws IOException {
-    FSDataOutputStream out = fs.create(f);
-    try {
-      byte[] b = new byte[1024 + RAN.nextInt(1024)];
-      RAN.nextBytes(b);
-      out.write(b);
-    } finally {
-      if (out != null) out.close();
-    }
-  }
-  
-  static String execCmd(FsShell shell, String... args) throws Exception {
-    ByteArrayOutputStream baout = new ByteArrayOutputStream();
-    PrintStream out = new PrintStream(baout, true);
-    PrintStream old = System.out;
-    System.setOut(out);
-    shell.run(args);
-    out.close();
-    System.setOut(old);
-    return baout.toString();
-  }
-  
-  private static String removePrefix(String lines, String prefix) {
-    final int prefixlen = prefix.length();
-    final StringTokenizer t = new StringTokenizer(lines, "\n");
-    final StringBuffer results = new StringBuffer(); 
-    for(; t.hasMoreTokens(); ) {
-      String s = t.nextToken();
-      results.append(s.substring(s.indexOf(prefix) + prefixlen) + "\n");
-    }
-    return results.toString();
-  }
-}

+ 0 - 445
src/test/hdfs-with-mr/org/apache/hadoop/fs/TestDFSIO.java

@@ -1,445 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import java.io.BufferedReader;
-import java.io.DataInputStream;
-import java.io.File;
-import java.io.FileOutputStream;
-import java.io.IOException;
-import java.io.InputStreamReader;
-import java.io.OutputStream;
-import java.io.PrintStream;
-import java.util.Date;
-import java.util.StringTokenizer;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.mapred.*;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * Distributed i/o benchmark.
- * <p>
- * This test writes into or reads from a specified number of files.
- * File size is specified as a parameter to the test. 
- * Each file is accessed in a separate map task.
- * <p>
- * The reducer collects the following statistics:
- * <ul>
- * <li>number of tasks completed</li>
- * <li>number of bytes written/read</li>
- * <li>execution time</li>
- * <li>io rate</li>
- * <li>io rate squared</li>
- * </ul>
- *    
- * Finally, the following information is appended to a local file
- * <ul>
- * <li>read or write test</li>
- * <li>date and time the test finished</li>   
- * <li>number of files</li>
- * <li>total number of bytes processed</li>
- * <li>throughput in mb/sec (total number of bytes / sum of processing times)</li>
- * <li>average i/o rate in mb/sec per file</li>
- * <li>standard deviation of i/o rate </li>
- * </ul>
- */
-public class TestDFSIO extends TestCase {
-  // Constants
-  private static final Log LOG = LogFactory.getLog(TestDFSIO.class);
-  private static final int TEST_TYPE_READ = 0;
-  private static final int TEST_TYPE_WRITE = 1;
-  private static final int TEST_TYPE_CLEANUP = 2;
-  private static final int DEFAULT_BUFFER_SIZE = 1000000;
-  private static final String BASE_FILE_NAME = "test_io_";
-  private static final String DEFAULT_RES_FILE_NAME = "TestDFSIO_results.log";
-  
-  private static Configuration fsConfig = new Configuration();
-  private static final long MEGA = 0x100000;
-  private static String TEST_ROOT_DIR = System.getProperty("test.build.data","/benchmarks/TestDFSIO");
-  private static Path CONTROL_DIR = new Path(TEST_ROOT_DIR, "io_control");
-  private static Path WRITE_DIR = new Path(TEST_ROOT_DIR, "io_write");
-  private static Path READ_DIR = new Path(TEST_ROOT_DIR, "io_read");
-  private static Path DATA_DIR = new Path(TEST_ROOT_DIR, "io_data");
-
-  /**
-   * Run the test with default parameters.
-   * 
-   * @throws Exception
-   */
-  public void testIOs() throws Exception {
-    testIOs(10, 10);
-  }
-
-  /**
-   * Run the test with the specified parameters.
-   * 
-   * @param fileSize file size
-   * @param nrFiles number of files
-   * @throws IOException
-   */
-  public static void testIOs(int fileSize, int nrFiles)
-    throws IOException {
-
-    FileSystem fs = FileSystem.get(fsConfig);
-
-    createControlFile(fs, fileSize, nrFiles);
-    writeTest(fs);
-    readTest(fs);
-    cleanup(fs);
-  }
-
-  private static void createControlFile(
-                                        FileSystem fs,
-                                        int fileSize, // in MB 
-                                        int nrFiles
-                                        ) throws IOException {
-    LOG.info("creating control file: "+fileSize+" mega bytes, "+nrFiles+" files");
-
-    fs.delete(CONTROL_DIR, true);
-
-    for(int i=0; i < nrFiles; i++) {
-      String name = getFileName(i);
-      Path controlFile = new Path(CONTROL_DIR, "in_file_" + name);
-      SequenceFile.Writer writer = null;
-      try {
-        writer = SequenceFile.createWriter(fs, fsConfig, controlFile,
-                                           Text.class, LongWritable.class,
-                                           CompressionType.NONE);
-        writer.append(new Text(name), new LongWritable(fileSize));
-      } catch(Exception e) {
-        throw new IOException(e.getLocalizedMessage());
-      } finally {
-    	if (writer != null)
-          writer.close();
-    	writer = null;
-      }
-    }
-    LOG.info("created control files for: "+nrFiles+" files");
-  }
-
-  private static String getFileName(int fIdx) {
-    return BASE_FILE_NAME + Integer.toString(fIdx);
-  }
-  
-  /**
-   * Write/Read mapper base class.
-   * <p>
-   * Collects the following statistics per task:
-   * <ul>
-   * <li>number of tasks completed</li>
-   * <li>number of bytes written/read</li>
-   * <li>execution time</li>
-   * <li>i/o rate</li>
-   * <li>i/o rate squared</li>
-   * </ul>
-   */
-  private abstract static class IOStatMapper extends IOMapperBase {
-    IOStatMapper() { 
-      super(fsConfig);
-    }
-    
-    void collectStats(OutputCollector<Text, Text> output, 
-                      String name,
-                      long execTime, 
-                      Object objSize) throws IOException {
-      long totalSize = ((Long)objSize).longValue();
-      float ioRateMbSec = (float)totalSize * 1000 / (execTime * MEGA);
-      LOG.info("Number of bytes processed = " + totalSize);
-      LOG.info("Exec time = " + execTime);
-      LOG.info("IO rate = " + ioRateMbSec);
-      
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "tasks"),
-          new Text(String.valueOf(1)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "size"),
-          new Text(String.valueOf(totalSize)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_LONG + "time"),
-          new Text(String.valueOf(execTime)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "rate"),
-          new Text(String.valueOf(ioRateMbSec*1000)));
-      output.collect(new Text(AccumulatingReducer.VALUE_TYPE_FLOAT + "sqrate"),
-          new Text(String.valueOf(ioRateMbSec*ioRateMbSec*1000)));
-    }
-  }
-
-  /**
-   * Write mapper class.
-   */
-  public static class WriteMapper extends IOStatMapper {
-
-    public WriteMapper() { 
-      super(); 
-      for(int i=0; i < bufferSize; i++)
-        buffer[i] = (byte)('0' + i % 50);
-    }
-
-    public Object doIO(Reporter reporter, 
-                       String name, 
-                       long totalSize 
-                       ) throws IOException {
-      // create file
-      totalSize *= MEGA;
-      OutputStream out;
-      out = fs.create(new Path(DATA_DIR, name), true, bufferSize);
-      
-      try {
-        // write to the file
-        long nrRemaining;
-        for (nrRemaining = totalSize; nrRemaining > 0; nrRemaining -= bufferSize) {
-          int curSize = (bufferSize < nrRemaining) ? bufferSize : (int)nrRemaining; 
-          out.write(buffer, 0, curSize);
-          reporter.setStatus("writing " + name + "@" + 
-                             (totalSize - nrRemaining) + "/" + totalSize 
-                             + " ::host = " + hostName);
-        }
-      } finally {
-        out.close();
-      }
-      return new Long(totalSize);
-    }
-  }
-
-  private static void writeTest(FileSystem fs)
-    throws IOException {
-
-    fs.delete(DATA_DIR, true);
-    fs.delete(WRITE_DIR, true);
-    
-    runIOTest(WriteMapper.class, WRITE_DIR);
-  }
-  
-  private static void runIOTest( Class<? extends Mapper> mapperClass, 
-                                 Path outputDir
-                                 ) throws IOException {
-    JobConf job = new JobConf(fsConfig, TestDFSIO.class);
-
-    FileInputFormat.setInputPaths(job, CONTROL_DIR);
-    job.setInputFormat(SequenceFileInputFormat.class);
-
-    job.setMapperClass(mapperClass);
-    job.setReducerClass(AccumulatingReducer.class);
-
-    FileOutputFormat.setOutputPath(job, outputDir);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(Text.class);
-    job.setNumReduceTasks(1);
-    JobClient.runJob(job);
-  }
-
-  /**
-   * Read mapper class.
-   */
-  public static class ReadMapper extends IOStatMapper {
-
-    public ReadMapper() { 
-      super(); 
-    }
-
-    public Object doIO(Reporter reporter, 
-                       String name, 
-                       long totalSize 
-                       ) throws IOException {
-      totalSize *= MEGA;
-      // open file
-      DataInputStream in = fs.open(new Path(DATA_DIR, name));
-      try {
-        long actualSize = 0;
-        for(int curSize = bufferSize; curSize == bufferSize;) {
-          curSize = in.read(buffer, 0, bufferSize);
-          actualSize += curSize;
-          reporter.setStatus("reading " + name + "@" + 
-                             actualSize + "/" + totalSize 
-                             + " ::host = " + hostName);
-        }
-      } finally {
-        in.close();
-      }
-      return new Long(totalSize);
-    }
-  }
-
-  private static void readTest(FileSystem fs) throws IOException {
-    fs.delete(READ_DIR, true);
-    runIOTest(ReadMapper.class, READ_DIR);
-  }
-
-  private static void sequentialTest(
-                                     FileSystem fs, 
-                                     int testType, 
-                                     int fileSize, 
-                                     int nrFiles
-                                     ) throws Exception {
-    IOStatMapper ioer = null;
-    if (testType == TEST_TYPE_READ)
-      ioer = new ReadMapper();
-    else if (testType == TEST_TYPE_WRITE)
-      ioer = new WriteMapper();
-    else
-      return;
-    for(int i=0; i < nrFiles; i++)
-      ioer.doIO(Reporter.NULL,
-                BASE_FILE_NAME+Integer.toString(i), 
-                MEGA*fileSize);
-  }
-
-  public static void main(String[] args) {
-    int testType = TEST_TYPE_READ;
-    int bufferSize = DEFAULT_BUFFER_SIZE;
-    int fileSize = 1;
-    int nrFiles = 1;
-    String resFileName = DEFAULT_RES_FILE_NAME;
-    boolean isSequential = false;
-    
-    String className = TestDFSIO.class.getSimpleName();
-    String version = className + ".0.0.4";
-    String usage = "Usage: " + className + " -read | -write | -clean [-nrFiles N] [-fileSize MB] [-resFile resultFileName] [-bufferSize Bytes] ";
-    
-    System.out.println(version);
-    if (args.length == 0) {
-      System.err.println(usage);
-      System.exit(-1);
-    }
-    for (int i = 0; i < args.length; i++) {       // parse command line
-      if (args[i].startsWith("-read")) {
-        testType = TEST_TYPE_READ;
-      } else if (args[i].equals("-write")) {
-        testType = TEST_TYPE_WRITE;
-      } else if (args[i].equals("-clean")) {
-        testType = TEST_TYPE_CLEANUP;
-      } else if (args[i].startsWith("-seq")) {
-        isSequential = true;
-      } else if (args[i].equals("-nrFiles")) {
-        nrFiles = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-fileSize")) {
-        fileSize = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-bufferSize")) {
-        bufferSize = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-resFile")) {
-        resFileName = args[++i];
-      }
-    }
-
-    LOG.info("nrFiles = " + nrFiles);
-    LOG.info("fileSize (MB) = " + fileSize);
-    LOG.info("bufferSize = " + bufferSize);
-  
-    try {
-      fsConfig.setInt("test.io.file.buffer.size", bufferSize);
-      FileSystem fs = FileSystem.get(fsConfig);
-
-      if (isSequential) {
-        long tStart = System.currentTimeMillis();
-        sequentialTest(fs, testType, fileSize, nrFiles);
-        long execTime = System.currentTimeMillis() - tStart;
-        String resultLine = "Seq Test exec time sec: " + (float)execTime / 1000;
-        LOG.info(resultLine);
-        return;
-      }
-      if (testType == TEST_TYPE_CLEANUP) {
-        cleanup(fs);
-        return;
-      }
-      createControlFile(fs, fileSize, nrFiles);
-      long tStart = System.currentTimeMillis();
-      if (testType == TEST_TYPE_WRITE)
-        writeTest(fs);
-      if (testType == TEST_TYPE_READ)
-        readTest(fs);
-      long execTime = System.currentTimeMillis() - tStart;
-    
-      analyzeResult(fs, testType, execTime, resFileName);
-    } catch(Exception e) {
-      System.err.print(StringUtils.stringifyException(e));
-      System.exit(-1);
-    }
-  }
-  
-  private static void analyzeResult( FileSystem fs, 
-                                     int testType,
-                                     long execTime,
-                                     String resFileName
-                                     ) throws IOException {
-    Path reduceFile;
-    if (testType == TEST_TYPE_WRITE)
-      reduceFile = new Path(WRITE_DIR, "part-00000");
-    else
-      reduceFile = new Path(READ_DIR, "part-00000");
-    DataInputStream in;
-    in = new DataInputStream(fs.open(reduceFile));
-  
-    BufferedReader lines;
-    lines = new BufferedReader(new InputStreamReader(in));
-    long tasks = 0;
-    long size = 0;
-    long time = 0;
-    float rate = 0;
-    float sqrate = 0;
-    String line;
-    while((line = lines.readLine()) != null) {
-      StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%");
-      String attr = tokens.nextToken(); 
-      if (attr.endsWith(":tasks"))
-        tasks = Long.parseLong(tokens.nextToken());
-      else if (attr.endsWith(":size"))
-        size = Long.parseLong(tokens.nextToken());
-      else if (attr.endsWith(":time"))
-        time = Long.parseLong(tokens.nextToken());
-      else if (attr.endsWith(":rate"))
-        rate = Float.parseFloat(tokens.nextToken());
-      else if (attr.endsWith(":sqrate"))
-        sqrate = Float.parseFloat(tokens.nextToken());
-    }
-    
-    double med = rate / 1000 / tasks;
-    double stdDev = Math.sqrt(Math.abs(sqrate / 1000 / tasks - med*med));
-    String resultLines[] = {
-      "----- TestDFSIO ----- : " + ((testType == TEST_TYPE_WRITE) ? "write" :
-                                    (testType == TEST_TYPE_READ) ? "read" : 
-                                    "unknown"),
-      "           Date & time: " + new Date(System.currentTimeMillis()),
-      "       Number of files: " + tasks,
-      "Total MBytes processed: " + size/MEGA,
-      "     Throughput mb/sec: " + size * 1000.0 / (time * MEGA),
-      "Average IO rate mb/sec: " + med,
-      " IO rate std deviation: " + stdDev,
-      "    Test exec time sec: " + (float)execTime / 1000,
-      "" };
-
-    PrintStream res = new PrintStream(
-                                      new FileOutputStream(
-                                                           new File(resFileName), true)); 
-    for(int i = 0; i < resultLines.length; i++) {
-      LOG.info(resultLines[i]);
-      res.println(resultLines[i]);
-    }
-  }
-
-  private static void cleanup(FileSystem fs) throws IOException {
-    LOG.info("Cleaning up test files");
-    fs.delete(new Path(TEST_ROOT_DIR), true);
-  }
-}

+ 0 - 629
src/test/hdfs-with-mr/org/apache/hadoop/fs/TestFileSystem.java

@@ -1,629 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import java.io.DataInputStream;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.util.Arrays;
-import java.util.Random;
-import java.util.List;
-import java.util.ArrayList;
-import java.util.Set;
-import java.util.HashSet;
-import java.util.Map;
-import java.util.HashMap;
-import java.net.InetSocketAddress;
-import java.net.URI;
-
-import junit.framework.TestCase;
-
-import org.apache.commons.logging.Log;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.fs.shell.CommandFormat;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.SequenceFile;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.mapred.*;
-import org.apache.hadoop.mapred.lib.LongSumReducer;
-import org.apache.hadoop.security.UnixUserGroupInformation;
-
-public class TestFileSystem extends TestCase {
-  private static final Log LOG = FileSystem.LOG;
-
-  private static Configuration conf = new Configuration();
-  private static int BUFFER_SIZE = conf.getInt("io.file.buffer.size", 4096);
-
-  private static final long MEGA = 1024 * 1024;
-  private static final int SEEKS_PER_FILE = 4;
-
-  private static String ROOT = System.getProperty("test.build.data","fs_test");
-  private static Path CONTROL_DIR = new Path(ROOT, "fs_control");
-  private static Path WRITE_DIR = new Path(ROOT, "fs_write");
-  private static Path READ_DIR = new Path(ROOT, "fs_read");
-  private static Path DATA_DIR = new Path(ROOT, "fs_data");
-
-  public void testFs() throws Exception {
-    testFs(10 * MEGA, 100, 0);
-  }
-
-  public static void testFs(long megaBytes, int numFiles, long seed)
-    throws Exception {
-
-    FileSystem fs = FileSystem.get(conf);
-
-    if (seed == 0)
-      seed = new Random().nextLong();
-
-    LOG.info("seed = "+seed);
-
-    createControlFile(fs, megaBytes, numFiles, seed);
-    writeTest(fs, false);
-    readTest(fs, false);
-    seekTest(fs, false);
-    fs.delete(CONTROL_DIR, true);
-    fs.delete(DATA_DIR, true);
-    fs.delete(WRITE_DIR, true);
-    fs.delete(READ_DIR, true);
-  }
-
-  public static void testCommandFormat() throws Exception {
-    // This should go to TestFsShell.java when it is added.
-    CommandFormat cf;
-    cf= new CommandFormat("copyToLocal", 2,2,"crc","ignoreCrc");
-    assertEquals(cf.parse(new String[] {"-get","file", "-"}, 1).get(1), "-");
-    assertEquals(cf.parse(new String[] {"-get","file","-ignoreCrc","/foo"}, 1).get(1),"/foo");
-    cf = new CommandFormat("tail", 1, 1, "f");
-    assertEquals(cf.parse(new String[] {"-tail","fileName"}, 1).get(0),"fileName");
-    assertEquals(cf.parse(new String[] {"-tail","-f","fileName"}, 1).get(0),"fileName");
-    cf = new CommandFormat("setrep", 2, 2, "R", "w");
-    assertEquals(cf.parse(new String[] {"-setrep","-R","2","/foo/bar"}, 1).get(1), "/foo/bar");
-    cf = new CommandFormat("put", 2, 10000);
-    assertEquals(cf.parse(new String[] {"-put", "-", "dest"}, 1).get(1), "dest"); 
-  }
-
-  public static void createControlFile(FileSystem fs,
-                                       long megaBytes, int numFiles,
-                                       long seed) throws Exception {
-
-    LOG.info("creating control file: "+megaBytes+" bytes, "+numFiles+" files");
-
-    Path controlFile = new Path(CONTROL_DIR, "files");
-    fs.delete(controlFile, true);
-    Random random = new Random(seed);
-
-    SequenceFile.Writer writer =
-      SequenceFile.createWriter(fs, conf, controlFile, 
-                                Text.class, LongWritable.class, CompressionType.NONE);
-
-    long totalSize = 0;
-    long maxSize = ((megaBytes / numFiles) * 2) + 1;
-    try {
-      while (totalSize < megaBytes) {
-        Text name = new Text(Long.toString(random.nextLong()));
-
-        long size = random.nextLong();
-        if (size < 0)
-          size = -size;
-        size = size % maxSize;
-
-        //LOG.info(" adding: name="+name+" size="+size);
-
-        writer.append(name, new LongWritable(size));
-
-        totalSize += size;
-      }
-    } finally {
-      writer.close();
-    }
-    LOG.info("created control file for: "+totalSize+" bytes");
-  }
-
-  public static class WriteMapper extends Configured
-      implements Mapper<Text, LongWritable, Text, LongWritable> {
-    
-    private Random random = new Random();
-    private byte[] buffer = new byte[BUFFER_SIZE];
-    private FileSystem fs;
-    private boolean fastCheck;
-
-    // a random suffix per task
-    private String suffix = "-"+random.nextLong();
-    
-    {
-      try {
-        fs = FileSystem.get(conf);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-
-    public WriteMapper() { super(null); }
-    
-    public WriteMapper(Configuration conf) { super(conf); }
-
-    public void configure(JobConf job) {
-      setConf(job);
-      fastCheck = job.getBoolean("fs.test.fastCheck", false);
-    }
-
-    public void map(Text key, LongWritable value,
-                    OutputCollector<Text, LongWritable> collector,
-                    Reporter reporter)
-      throws IOException {
-      
-      String name = key.toString();
-      long size = value.get();
-      long seed = Long.parseLong(name);
-
-      random.setSeed(seed);
-      reporter.setStatus("creating " + name);
-
-      // write to temp file initially to permit parallel execution
-      Path tempFile = new Path(DATA_DIR, name+suffix);
-      OutputStream out = fs.create(tempFile);
-
-      long written = 0;
-      try {
-        while (written < size) {
-          if (fastCheck) {
-            Arrays.fill(buffer, (byte)random.nextInt(Byte.MAX_VALUE));
-          } else {
-            random.nextBytes(buffer);
-          }
-          long remains = size - written;
-          int length = (remains<=buffer.length) ? (int)remains : buffer.length;
-          out.write(buffer, 0, length);
-          written += length;
-          reporter.setStatus("writing "+name+"@"+written+"/"+size);
-        }
-      } finally {
-        out.close();
-      }
-      // rename to final location
-      fs.rename(tempFile, new Path(DATA_DIR, name));
-
-      collector.collect(new Text("bytes"), new LongWritable(written));
-
-      reporter.setStatus("wrote " + name);
-    }
-    
-    public void close() {
-    }
-    
-  }
-
-  public static void writeTest(FileSystem fs, boolean fastCheck)
-    throws Exception {
-
-    fs.delete(DATA_DIR, true);
-    fs.delete(WRITE_DIR, true);
-    
-    JobConf job = new JobConf(conf, TestFileSystem.class);
-    job.setBoolean("fs.test.fastCheck", fastCheck);
-
-    FileInputFormat.setInputPaths(job, CONTROL_DIR);
-    job.setInputFormat(SequenceFileInputFormat.class);
-
-    job.setMapperClass(WriteMapper.class);
-    job.setReducerClass(LongSumReducer.class);
-
-    FileOutputFormat.setOutputPath(job, WRITE_DIR);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(LongWritable.class);
-    job.setNumReduceTasks(1);
-    JobClient.runJob(job);
-  }
-
-  public static class ReadMapper extends Configured
-      implements Mapper<Text, LongWritable, Text, LongWritable> {
-    
-    private Random random = new Random();
-    private byte[] buffer = new byte[BUFFER_SIZE];
-    private byte[] check  = new byte[BUFFER_SIZE];
-    private FileSystem fs;
-    private boolean fastCheck;
-
-    {
-      try {
-        fs = FileSystem.get(conf);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-
-    public ReadMapper() { super(null); }
-    
-    public ReadMapper(Configuration conf) { super(conf); }
-
-    public void configure(JobConf job) {
-      setConf(job);
-      fastCheck = job.getBoolean("fs.test.fastCheck", false);
-    }
-
-    public void map(Text key, LongWritable value,
-                    OutputCollector<Text, LongWritable> collector,
-                    Reporter reporter)
-      throws IOException {
-      
-      String name = key.toString();
-      long size = value.get();
-      long seed = Long.parseLong(name);
-
-      random.setSeed(seed);
-      reporter.setStatus("opening " + name);
-
-      DataInputStream in =
-        new DataInputStream(fs.open(new Path(DATA_DIR, name)));
-
-      long read = 0;
-      try {
-        while (read < size) {
-          long remains = size - read;
-          int n = (remains<=buffer.length) ? (int)remains : buffer.length;
-          in.readFully(buffer, 0, n);
-          read += n;
-          if (fastCheck) {
-            Arrays.fill(check, (byte)random.nextInt(Byte.MAX_VALUE));
-          } else {
-            random.nextBytes(check);
-          }
-          if (n != buffer.length) {
-            Arrays.fill(buffer, n, buffer.length, (byte)0);
-            Arrays.fill(check, n, check.length, (byte)0);
-          }
-          assertTrue(Arrays.equals(buffer, check));
-
-          reporter.setStatus("reading "+name+"@"+read+"/"+size);
-
-        }
-      } finally {
-        in.close();
-      }
-
-      collector.collect(new Text("bytes"), new LongWritable(read));
-
-      reporter.setStatus("read " + name);
-    }
-    
-    public void close() {
-    }
-    
-  }
-
-  public static void readTest(FileSystem fs, boolean fastCheck)
-    throws Exception {
-
-    fs.delete(READ_DIR, true);
-
-    JobConf job = new JobConf(conf, TestFileSystem.class);
-    job.setBoolean("fs.test.fastCheck", fastCheck);
-
-
-    FileInputFormat.setInputPaths(job, CONTROL_DIR);
-    job.setInputFormat(SequenceFileInputFormat.class);
-
-    job.setMapperClass(ReadMapper.class);
-    job.setReducerClass(LongSumReducer.class);
-
-    FileOutputFormat.setOutputPath(job, READ_DIR);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(LongWritable.class);
-    job.setNumReduceTasks(1);
-    JobClient.runJob(job);
-  }
-
-
-  public static class SeekMapper<K> extends Configured
-    implements Mapper<Text, LongWritable, K, LongWritable> {
-    
-    private Random random = new Random();
-    private byte[] check  = new byte[BUFFER_SIZE];
-    private FileSystem fs;
-    private boolean fastCheck;
-
-    {
-      try {
-        fs = FileSystem.get(conf);
-      } catch (IOException e) {
-        throw new RuntimeException(e);
-      }
-    }
-
-    public SeekMapper() { super(null); }
-    
-    public SeekMapper(Configuration conf) { super(conf); }
-
-    public void configure(JobConf job) {
-      setConf(job);
-      fastCheck = job.getBoolean("fs.test.fastCheck", false);
-    }
-
-    public void map(Text key, LongWritable value,
-                    OutputCollector<K, LongWritable> collector,
-                    Reporter reporter)
-      throws IOException {
-      String name = key.toString();
-      long size = value.get();
-      long seed = Long.parseLong(name);
-
-      if (size == 0) return;
-
-      reporter.setStatus("opening " + name);
-
-      FSDataInputStream in = fs.open(new Path(DATA_DIR, name));
-        
-      try {
-        for (int i = 0; i < SEEKS_PER_FILE; i++) {
-          // generate a random position
-          long position = Math.abs(random.nextLong()) % size;
-          
-          // seek file to that position
-          reporter.setStatus("seeking " + name);
-          in.seek(position);
-          byte b = in.readByte();
-          
-          // check that byte matches
-          byte checkByte = 0;
-          // advance random state to that position
-          random.setSeed(seed);
-          for (int p = 0; p <= position; p+= check.length) {
-            reporter.setStatus("generating data for " + name);
-            if (fastCheck) {
-              checkByte = (byte)random.nextInt(Byte.MAX_VALUE);
-            } else {
-              random.nextBytes(check);
-              checkByte = check[(int)(position % check.length)];
-            }
-          }
-          assertEquals(b, checkByte);
-        }
-      } finally {
-        in.close();
-      }
-    }
-    
-    public void close() {
-    }
-    
-  }
-
-  public static void seekTest(FileSystem fs, boolean fastCheck)
-    throws Exception {
-
-    fs.delete(READ_DIR, true);
-
-    JobConf job = new JobConf(conf, TestFileSystem.class);
-    job.setBoolean("fs.test.fastCheck", fastCheck);
-
-    FileInputFormat.setInputPaths(job,CONTROL_DIR);
-    job.setInputFormat(SequenceFileInputFormat.class);
-
-    job.setMapperClass(SeekMapper.class);
-    job.setReducerClass(LongSumReducer.class);
-
-    FileOutputFormat.setOutputPath(job, READ_DIR);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(LongWritable.class);
-    job.setNumReduceTasks(1);
-    JobClient.runJob(job);
-  }
-
-
-  public static void main(String[] args) throws Exception {
-    int megaBytes = 10;
-    int files = 100;
-    boolean noRead = false;
-    boolean noWrite = false;
-    boolean noSeek = false;
-    boolean fastCheck = false;
-    long seed = new Random().nextLong();
-
-    String usage = "Usage: TestFileSystem -files N -megaBytes M [-noread] [-nowrite] [-noseek] [-fastcheck]";
-    
-    if (args.length == 0) {
-      System.err.println(usage);
-      System.exit(-1);
-    }
-    for (int i = 0; i < args.length; i++) {       // parse command line
-      if (args[i].equals("-files")) {
-        files = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-megaBytes")) {
-        megaBytes = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-noread")) {
-        noRead = true;
-      } else if (args[i].equals("-nowrite")) {
-        noWrite = true;
-      } else if (args[i].equals("-noseek")) {
-        noSeek = true;
-      } else if (args[i].equals("-fastcheck")) {
-        fastCheck = true;
-      }
-    }
-
-    LOG.info("seed = "+seed);
-    LOG.info("files = " + files);
-    LOG.info("megaBytes = " + megaBytes);
-  
-    FileSystem fs = FileSystem.get(conf);
-
-    if (!noWrite) {
-      createControlFile(fs, megaBytes*MEGA, files, seed);
-      writeTest(fs, fastCheck);
-    }
-    if (!noRead) {
-      readTest(fs, fastCheck);
-    }
-    if (!noSeek) {
-      seekTest(fs, fastCheck);
-    }
-  }
-
-  static Configuration createConf4Testing(String username) throws Exception {
-    Configuration conf = new Configuration();
-    UnixUserGroupInformation.saveToConf(conf,
-        UnixUserGroupInformation.UGI_PROPERTY_NAME,
-        new UnixUserGroupInformation(username, new String[]{"group"}));
-    return conf;    
-  }
-
-  public void testFsCache() throws Exception {
-    {
-      long now = System.currentTimeMillis();
-      Configuration[] conf = {new Configuration(),
-          createConf4Testing("foo" + now), createConf4Testing("bar" + now)};
-      FileSystem[] fs = new FileSystem[conf.length];
-  
-      for(int i = 0; i < conf.length; i++) {
-        fs[i] = FileSystem.get(conf[i]);
-        assertEquals(fs[i], FileSystem.get(conf[i]));
-        for(int j = 0; j < i; j++) {
-          assertFalse(fs[j] == fs[i]);
-        }
-      }
-      FileSystem.closeAll();
-    }
-    
-    {
-      try {
-        runTestCache(NameNode.DEFAULT_PORT);
-      } catch(java.net.BindException be) {
-        LOG.warn("Cannot test NameNode.DEFAULT_PORT (="
-            + NameNode.DEFAULT_PORT + ")", be);
-      }
-
-      runTestCache(0);
-    }
-  }
-  
-  static void runTestCache(int port) throws Exception {
-    Configuration conf = new Configuration();
-    MiniDFSCluster cluster = null;
-    try {
-      cluster = new MiniDFSCluster(port, conf, 2, true, true, null, null);
-      URI uri = cluster.getFileSystem().getUri();
-      LOG.info("uri=" + uri);
-
-      {
-        FileSystem fs = FileSystem.get(uri, new Configuration());
-        checkPath(cluster, fs);
-        for(int i = 0; i < 100; i++) {
-          assertTrue(fs == FileSystem.get(uri, new Configuration()));
-        }
-      }
-      
-      if (port == NameNode.DEFAULT_PORT) {
-        //test explicit default port
-        URI uri2 = new URI(uri.getScheme(), uri.getUserInfo(),
-            uri.getHost(), NameNode.DEFAULT_PORT, uri.getPath(),
-            uri.getQuery(), uri.getFragment());  
-        LOG.info("uri2=" + uri2);
-        FileSystem fs = FileSystem.get(uri2, conf);
-        checkPath(cluster, fs);
-        for(int i = 0; i < 100; i++) {
-          assertTrue(fs == FileSystem.get(uri2, new Configuration()));
-        }
-      }
-    } finally {
-      if (cluster != null) cluster.shutdown(); 
-    }
-  }
-  
-  static void checkPath(MiniDFSCluster cluster, FileSystem fileSys) throws IOException {
-    InetSocketAddress add = cluster.getNameNode().getNameNodeAddress();
-    // Test upper/lower case
-    fileSys.checkPath(new Path("hdfs://" + add.getHostName().toUpperCase() + ":" + add.getPort()));
-  }
-
-  public void testFsClose() throws Exception {
-    {
-      Configuration conf = new Configuration();
-      new Path("file:///").getFileSystem(conf);
-      UnixUserGroupInformation.login(conf, true);
-      FileSystem.closeAll();
-    }
-
-    {
-      Configuration conf = new Configuration();
-      new Path("hftp://localhost:12345/").getFileSystem(conf);
-      UnixUserGroupInformation.login(conf, true);
-      FileSystem.closeAll();
-    }
-
-    {
-      Configuration conf = new Configuration();
-      FileSystem fs = new Path("hftp://localhost:12345/").getFileSystem(conf);
-      UnixUserGroupInformation.login(fs.getConf(), true);
-      FileSystem.closeAll();
-    }
-  }
-
-
-  public void testCacheKeysAreCaseInsensitive()
-    throws Exception
-  {
-    Configuration conf = new Configuration();
-    
-    // check basic equality
-    FileSystem.Cache.Key lowercaseCachekey1 = new FileSystem.Cache.Key(new URI("hftp://localhost:12345/"), conf);
-    FileSystem.Cache.Key lowercaseCachekey2 = new FileSystem.Cache.Key(new URI("hftp://localhost:12345/"), conf);
-    assertEquals( lowercaseCachekey1, lowercaseCachekey2 );
-
-    // check insensitive equality    
-    FileSystem.Cache.Key uppercaseCachekey = new FileSystem.Cache.Key(new URI("HFTP://Localhost:12345/"), conf);
-    assertEquals( lowercaseCachekey2, uppercaseCachekey );
-
-    // check behaviour with collections
-    List<FileSystem.Cache.Key> list = new ArrayList<FileSystem.Cache.Key>();
-    list.add(uppercaseCachekey);
-    assertTrue(list.contains(uppercaseCachekey));
-    assertTrue(list.contains(lowercaseCachekey2));
-
-    Set<FileSystem.Cache.Key> set = new HashSet<FileSystem.Cache.Key>();
-    set.add(uppercaseCachekey);
-    assertTrue(set.contains(uppercaseCachekey));
-    assertTrue(set.contains(lowercaseCachekey2));
-
-    Map<FileSystem.Cache.Key, String> map = new HashMap<FileSystem.Cache.Key, String>();
-    map.put(uppercaseCachekey, "");
-    assertTrue(map.containsKey(uppercaseCachekey));
-    assertTrue(map.containsKey(lowercaseCachekey2));    
-
-  }
-
-  public static void testFsUniqueness(long megaBytes, int numFiles, long seed)
-    throws Exception {
-
-    // multiple invocations of FileSystem.get return the same object.
-    FileSystem fs1 = FileSystem.get(conf);
-    FileSystem fs2 = FileSystem.get(conf);
-    assertTrue(fs1 == fs2);
-
-    // multiple invocations of FileSystem.newInstance return different objects
-    fs1 = FileSystem.newInstance(conf);
-    fs2 = FileSystem.newInstance(conf);
-    assertTrue(fs1 != fs2 && !fs1.equals(fs2));
-    fs1.close();
-    fs2.close();
-  }
-}

+ 0 - 213
src/test/hdfs-with-mr/org/apache/hadoop/fs/TestHarFileSystem.java

@@ -1,213 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.fs;
-
-import java.io.IOException;
-import java.util.Iterator;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.*;
-import org.apache.hadoop.tools.HadoopArchives;
-import org.apache.hadoop.util.ToolRunner;
-
-/**
- * test the har file system
- * create a har filesystem
- * run fs commands
- * and then run a map reduce job
- */
-public class TestHarFileSystem extends TestCase {
-  private Path inputPath;
-  private MiniDFSCluster dfscluster;
-  private MiniMRCluster mapred;
-  private FileSystem fs;
-  private Path filea, fileb, filec;
-  private Path archivePath;
-  
-  protected void setUp() throws Exception {
-    super.setUp();
-    dfscluster = new MiniDFSCluster(new JobConf(), 2, true, null);
-    fs = dfscluster.getFileSystem();
-    mapred = new MiniMRCluster(2, fs.getUri().toString(), 1);
-    inputPath = new Path(fs.getHomeDirectory(), "test"); 
-    filea = new Path(inputPath,"a");
-    fileb = new Path(inputPath,"b");
-    filec = new Path(inputPath,"c");
-    archivePath = new Path(fs.getHomeDirectory(), "tmp");
-  }
-  
-  protected void tearDown() throws Exception {
-    try {
-      if (mapred != null) {
-        mapred.shutdown();
-      }
-      if (dfscluster != null) {
-        dfscluster.shutdown();
-      }
-    } catch(Exception e) {
-      System.err.println(e);
-    }
-    super.tearDown();
-  }
-  
-  static class TextMapperReducer implements Mapper<LongWritable, Text, Text, Text>, 
-            Reducer<Text, Text, Text, Text> {
-    
-    public void configure(JobConf conf) {
-      //do nothing 
-    }
-
-    public void map(LongWritable key, Text value, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
-      output.collect(value, new Text(""));
-    }
-
-    public void close() throws IOException {
-      // do nothing
-    }
-
-    public void reduce(Text key, Iterator<Text> values, OutputCollector<Text, Text> output, Reporter reporter) throws IOException {
-      while(values.hasNext()) { 
-        values.next();
-        output.collect(key, null);
-      }
-    }
-  }
-  
-  public void testArchives() throws Exception {
-    fs.mkdirs(inputPath);
-    
-    FSDataOutputStream out = fs.create(filea); 
-    out.write("a".getBytes());
-    out.close();
-    out = fs.create(fileb);
-    out.write("b".getBytes());
-    out.close();
-    out = fs.create(filec);
-    out.write("c".getBytes());
-    out.close();
-    Configuration conf = mapred.createJobConf();
-    HadoopArchives har = new HadoopArchives(conf);
-    String[] args = new String[3];
-    //check for destination not specfied
-    args[0] = "-archiveName";
-    args[1] = "foo.har";
-    args[2] = inputPath.toString();
-    int ret = ToolRunner.run(har, args);
-    assertTrue(ret != 0);
-    args = new String[4];
-    //check for wrong archiveName
-    args[0] = "-archiveName";
-    args[1] = "/d/foo.har";
-    args[2] = inputPath.toString();
-    args[3] = archivePath.toString();
-    ret = ToolRunner.run(har, args);
-    assertTrue(ret != 0);
-//  se if dest is a file 
-    args[1] = "foo.har";
-    args[3] = filec.toString();
-    ret = ToolRunner.run(har, args);
-    assertTrue(ret != 0);
-    //this is a valid run
-    args[0] = "-archiveName";
-    args[1] = "foo.har";
-    args[2] = inputPath.toString();
-    args[3] = archivePath.toString();
-    ret = ToolRunner.run(har, args);
-    //checl for the existenece of the archive
-    assertTrue(ret == 0);
-    ///try running it again. it should not 
-    // override the directory
-    ret = ToolRunner.run(har, args);
-    assertTrue(ret != 0);
-    Path finalPath = new Path(archivePath, "foo.har");
-    Path fsPath = new Path(inputPath.toUri().getPath());
-    String relative = fsPath.toString().substring(1);
-    Path filePath = new Path(finalPath, relative);
-    //make it a har path 
-    Path harPath = new Path("har://" + filePath.toUri().getPath());
-    assertTrue(fs.exists(new Path(finalPath, "_index")));
-    assertTrue(fs.exists(new Path(finalPath, "_masterindex")));
-    assertTrue(!fs.exists(new Path(finalPath, "_logs")));
-    //creation tested
-    //check if the archive is same
-    // do ls and cat on all the files
-    FsShell shell = new FsShell(conf);
-    args = new String[2];
-    args[0] = "-ls";
-    args[1] = harPath.toString();
-    ret = ToolRunner.run(shell, args);
-    // ls should work.
-    assertTrue((ret == 0));
-    //now check for contents of filea
-    // fileb and filec
-    Path harFilea = new Path(harPath, "a");
-    Path harFileb = new Path(harPath, "b");
-    Path harFilec = new Path(harPath, "c");
-    FileSystem harFs = harFilea.getFileSystem(conf);
-    FSDataInputStream fin = harFs.open(harFilea);
-    byte[] b = new byte[4];
-    int readBytes = fin.read(b);
-    assertTrue("Empty read.", readBytes > 0);
-    fin.close();
-    assertTrue("strings are equal ", (b[0] == "a".getBytes()[0]));
-    fin = harFs.open(harFileb);
-    readBytes = fin.read(b);
-    assertTrue("Empty read.", readBytes > 0);
-    fin.close();
-    assertTrue("strings are equal ", (b[0] == "b".getBytes()[0]));
-    fin = harFs.open(harFilec);
-    readBytes = fin.read(b);
-    assertTrue("Empty read.", readBytes > 0);
-    fin.close();
-    assertTrue("strings are equal ", (b[0] == "c".getBytes()[0]));
-    // ok all files match 
-    // run a map reduce job
-    Path outdir = new Path(fs.getHomeDirectory(), "mapout"); 
-    JobConf jobconf = mapred.createJobConf();
-    FileInputFormat.addInputPath(jobconf, harPath);
-    jobconf.setInputFormat(TextInputFormat.class);
-    jobconf.setOutputFormat(TextOutputFormat.class);
-    FileOutputFormat.setOutputPath(jobconf, outdir);
-    jobconf.setMapperClass(TextMapperReducer.class);
-    jobconf.setMapOutputKeyClass(Text.class);
-    jobconf.setMapOutputValueClass(Text.class);
-    jobconf.setReducerClass(TextMapperReducer.class);
-    jobconf.setNumReduceTasks(1);
-    JobClient.runJob(jobconf);
-    args[1] = outdir.toString();
-    ret = ToolRunner.run(shell, args);
-    
-    FileStatus[] status = fs.globStatus(new Path(outdir, "part*"));
-    Path reduceFile = status[0].getPath();
-    FSDataInputStream reduceIn = fs.open(reduceFile);
-    b = new byte[6];
-    readBytes = reduceIn.read(b);
-    assertTrue("Should read 6 bytes.", readBytes == 6);
-    //assuming all the 6 bytes were read.
-    Text readTxt = new Text(b);
-    assertTrue("a\nb\nc\n".equals(readTxt.toString()));
-    assertTrue("number of bytes left should be -1", reduceIn.read(b) == -1);
-    reduceIn.close();
-  }
-}

+ 0 - 964
src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBench.java

@@ -1,964 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import java.util.Date;
-import java.io.DataInputStream;
-import java.io.FileOutputStream;
-import java.io.InputStreamReader;
-import java.io.PrintStream;
-import java.io.File;
-import java.io.BufferedReader;
-import java.util.StringTokenizer;
-import java.net.InetAddress;
-import java.text.SimpleDateFormat;
-import java.util.Iterator;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.Log;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FileSystem;
-
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.LongWritable;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.io.SequenceFile;
-
-import org.apache.hadoop.mapred.FileInputFormat;
-import org.apache.hadoop.mapred.FileOutputFormat;
-import org.apache.hadoop.mapred.Mapper;
-import org.apache.hadoop.mapred.SequenceFileInputFormat;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.MapReduceBase;
-import org.apache.hadoop.mapred.Reporter;
-import org.apache.hadoop.mapred.OutputCollector;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.Reducer;
-
-/**
- * This program executes a specified operation that applies load to 
- * the NameNode.
- * 
- * When run simultaneously on multiple nodes, this program functions 
- * as a stress-test and benchmark for namenode, especially when 
- * the number of bytes written to each file is small.
- * 
- * Valid operations are:
- *   create_write
- *   open_read
- *   rename
- *   delete
- * 
- * NOTE: The open_read, rename and delete operations assume that the files
- *       they operate on are already available. The create_write operation 
- *       must be run before running the other operations.
- */
-
-public class NNBench {
-  private static final Log LOG = LogFactory.getLog(
-          "org.apache.hadoop.hdfs.NNBench");
-  
-  protected static String CONTROL_DIR_NAME = "control";
-  protected static String OUTPUT_DIR_NAME = "output";
-  protected static String DATA_DIR_NAME = "data";
-  protected static final String DEFAULT_RES_FILE_NAME = "NNBench_results.log";
-  protected static final String NNBENCH_VERSION = "NameNode Benchmark 0.4";
-  
-  public static String operation = "none";
-  public static long numberOfMaps = 1l; // default is 1
-  public static long numberOfReduces = 1l; // default is 1
-  public static long startTime = 
-          System.currentTimeMillis() + (120 * 1000); // default is 'now' + 2min
-  public static long blockSize = 1l; // default is 1
-  public static int bytesToWrite = 0; // default is 0
-  public static long bytesPerChecksum = 1l; // default is 1
-  public static long numberOfFiles = 1l; // default is 1
-  public static short replicationFactorPerFile = 1; // default is 1
-  public static String baseDir = "/benchmarks/NNBench";  // default
-  public static boolean readFileAfterOpen = false; // default is to not read
-  
-  // Supported operations
-  private static final String OP_CREATE_WRITE = "create_write";
-  private static final String OP_OPEN_READ = "open_read";
-  private static final String OP_RENAME = "rename";
-  private static final String OP_DELETE = "delete";
-  
-  // To display in the format that matches the NN and DN log format
-  // Example: 2007-10-26 00:01:19,853
-  static SimpleDateFormat sdf = 
-          new SimpleDateFormat("yyyy-MM-dd' 'HH:mm:ss','S");
-
-  private static Configuration config = new Configuration();
-  
-  /**
-   * Clean up the files before a test run
-   * 
-   * @throws IOException on error
-   */
-  private static void cleanupBeforeTestrun() throws IOException {
-    FileSystem tempFS = FileSystem.get(config);
-    
-    // Delete the data directory only if it is the create/write operation
-    if (operation.equals(OP_CREATE_WRITE)) {
-      LOG.info("Deleting data directory");
-      tempFS.delete(new Path(baseDir, DATA_DIR_NAME), true);
-    }
-    tempFS.delete(new Path(baseDir, CONTROL_DIR_NAME), true);
-    tempFS.delete(new Path(baseDir, OUTPUT_DIR_NAME), true);
-  }
-  
-  /**
-   * Create control files before a test run.
-   * Number of files created is equal to the number of maps specified
-   * 
-   * @throws IOException on error
-   */
-  private static void createControlFiles() throws IOException {
-    FileSystem tempFS = FileSystem.get(config);
-    LOG.info("Creating " + numberOfMaps + " control files");
-
-    for (int i = 0; i < numberOfMaps; i++) {
-      String strFileName = "NNBench_Controlfile_" + i;
-      Path filePath = new Path(new Path(baseDir, CONTROL_DIR_NAME),
-              strFileName);
-
-      SequenceFile.Writer writer = null;
-      try {
-        writer = SequenceFile.createWriter(tempFS, config, filePath, Text.class, 
-                LongWritable.class, CompressionType.NONE);
-        writer.append(new Text(strFileName), new LongWritable(0l));
-      } catch(Exception e) {
-        throw new IOException(e.getLocalizedMessage());
-      } finally {
-        if (writer != null) {
-          writer.close();
-        }
-        writer = null;
-      }
-    }
-  }
-  /**
-   * Display version
-   */
-  private static void displayVersion() {
-    System.out.println(NNBENCH_VERSION);
-  }
-  
-  /**
-   * Display usage
-   */
-  private static void displayUsage() {
-    String usage =
-      "Usage: nnbench <options>\n" +
-      "Options:\n" +
-      "\t-operation <Available operations are " + OP_CREATE_WRITE + " " +
-      OP_OPEN_READ + " " + OP_RENAME + " " + OP_DELETE + ". " +
-      "This option is mandatory>\n" +
-      "\t * NOTE: The open_read, rename and delete operations assume " +
-      "that the files they operate on, are already available. " +
-      "The create_write operation must be run before running the " +
-      "other operations.\n" +
-      "\t-maps <number of maps. default is 1. This is not mandatory>\n" +
-      "\t-reduces <number of reduces. default is 1. This is not mandatory>\n" +
-      "\t-startTime <time to start, given in seconds from the epoch. " +
-      "Make sure this is far enough into the future, so all maps " +
-      "(operations) will start at the same time>. " +
-      "default is launch time + 2 mins. This is not mandatory \n" +
-      "\t-blockSize <Block size in bytes. default is 1. " + 
-      "This is not mandatory>\n" +
-      "\t-bytesToWrite <Bytes to write. default is 0. " + 
-      "This is not mandatory>\n" +
-      "\t-bytesPerChecksum <Bytes per checksum for the files. default is 1. " + 
-      "This is not mandatory>\n" +
-      "\t-numberOfFiles <number of files to create. default is 1. " +
-      "This is not mandatory>\n" +
-      "\t-replicationFactorPerFile <Replication factor for the files." +
-        " default is 1. This is not mandatory>\n" +
-      "\t-baseDir <base DFS path. default is /becnhmarks/NNBench. " +
-      "This is not mandatory>\n" +
-      "\t-readFileAfterOpen <true or false. if true, it reads the file and " +
-      "reports the average time to read. This is valid with the open_read " +
-      "operation. default is false. This is not mandatory>\n" +
-      "\t-help: Display the help statement\n";
-      
-    
-    System.out.println(usage);
-  }
-
-  /**
-   * check for arguments and fail if the values are not specified
-   */
-  public static void checkArgs(final int index, final int length) {
-    if (index == length) {
-      displayUsage();
-      System.exit(-1);
-    }
-  }
-  
-  /**
-   * Parse input arguments
-   * 
-   * @params args Command line inputs
-   */
-  public static void parseInputs(final String[] args) {
-    // If there are no command line arguments, exit
-    if (args.length == 0) {
-      displayUsage();
-      System.exit(-1);
-    }
-    
-    // Parse command line args
-    for (int i = 0; i < args.length; i++) {
-      if (args[i].equals("-operation")) {
-        operation = args[++i];
-      } else if (args[i].equals("-maps")) {
-        checkArgs(i + 1, args.length);
-        numberOfMaps = Long.parseLong(args[++i]);
-      } else if (args[i].equals("-reduces")) {
-        checkArgs(i + 1, args.length);
-        numberOfReduces = Long.parseLong(args[++i]);
-      } else if (args[i].equals("-startTime")) {
-        checkArgs(i + 1, args.length);
-        startTime = Long.parseLong(args[++i]) * 1000;
-      } else if (args[i].equals("-blockSize")) {
-        checkArgs(i + 1, args.length);
-        blockSize = Long.parseLong(args[++i]);
-      } else if (args[i].equals("-bytesToWrite")) {
-        checkArgs(i + 1, args.length);
-        bytesToWrite = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-bytesPerChecksum")) {
-        checkArgs(i + 1, args.length);
-        bytesPerChecksum = Long.parseLong(args[++i]);
-      } else if (args[i].equals("-numberOfFiles")) {
-        checkArgs(i + 1, args.length);
-        numberOfFiles = Long.parseLong(args[++i]);
-      } else if (args[i].equals("-replicationFactorPerFile")) {
-        checkArgs(i + 1, args.length);
-        replicationFactorPerFile = Short.parseShort(args[++i]);
-      } else if (args[i].equals("-baseDir")) {
-        checkArgs(i + 1, args.length);
-        baseDir = args[++i];
-      } else if (args[i].equals("-readFileAfterOpen")) {
-        checkArgs(i + 1, args.length);
-        readFileAfterOpen = Boolean.parseBoolean(args[++i]);
-      } else if (args[i].equals("-help")) {
-        displayUsage();
-        System.exit(-1);
-      }
-    }
-    
-    LOG.info("Test Inputs: ");
-    LOG.info("           Test Operation: " + operation);
-    LOG.info("               Start time: " + sdf.format(new Date(startTime)));
-    LOG.info("           Number of maps: " + numberOfMaps);
-    LOG.info("        Number of reduces: " + numberOfReduces);
-    LOG.info("               Block Size: " + blockSize);
-    LOG.info("           Bytes to write: " + bytesToWrite);
-    LOG.info("       Bytes per checksum: " + bytesPerChecksum);
-    LOG.info("          Number of files: " + numberOfFiles);
-    LOG.info("       Replication factor: " + replicationFactorPerFile);
-    LOG.info("                 Base dir: " + baseDir);
-    LOG.info("     Read file after open: " + readFileAfterOpen);
-    
-    // Set user-defined parameters, so the map method can access the values
-    config.set("test.nnbench.operation", operation);
-    config.setLong("test.nnbench.maps", numberOfMaps);
-    config.setLong("test.nnbench.reduces", numberOfReduces);
-    config.setLong("test.nnbench.starttime", startTime);
-    config.setLong("test.nnbench.blocksize", blockSize);
-    config.setInt("test.nnbench.bytestowrite", bytesToWrite);
-    config.setLong("test.nnbench.bytesperchecksum", bytesPerChecksum);
-    config.setLong("test.nnbench.numberoffiles", numberOfFiles);
-    config.setInt("test.nnbench.replicationfactor", 
-            (int) replicationFactorPerFile);
-    config.set("test.nnbench.basedir", baseDir);
-    config.setBoolean("test.nnbench.readFileAfterOpen", readFileAfterOpen);
-
-    config.set("test.nnbench.datadir.name", DATA_DIR_NAME);
-    config.set("test.nnbench.outputdir.name", OUTPUT_DIR_NAME);
-    config.set("test.nnbench.controldir.name", CONTROL_DIR_NAME);
-  }
-  
-  /**
-   * Analyze the results
-   * 
-   * @throws IOException on error
-   */
-  private static void analyzeResults() throws IOException {
-    final FileSystem fs = FileSystem.get(config);
-    Path reduceFile = new Path(new Path(baseDir, OUTPUT_DIR_NAME),
-            "part-00000");
-
-    DataInputStream in;
-    in = new DataInputStream(fs.open(reduceFile));
-
-    BufferedReader lines;
-    lines = new BufferedReader(new InputStreamReader(in));
-
-    long totalTimeAL1 = 0l;
-    long totalTimeAL2 = 0l;
-    long totalTimeTPmS = 0l;
-    long lateMaps = 0l;
-    long numOfExceptions = 0l;
-    long successfulFileOps = 0l;
-    
-    long mapStartTimeTPmS = 0l;
-    long mapEndTimeTPmS = 0l;
-    
-    String resultTPSLine1 = null;
-    String resultTPSLine2 = null;
-    String resultALLine1 = null;
-    String resultALLine2 = null;
-    
-    String line;
-    while((line = lines.readLine()) != null) {
-      StringTokenizer tokens = new StringTokenizer(line, " \t\n\r\f%;");
-      String attr = tokens.nextToken();
-      if (attr.endsWith(":totalTimeAL1")) {
-        totalTimeAL1 = Long.parseLong(tokens.nextToken());
-      } else if (attr.endsWith(":totalTimeAL2")) {
-        totalTimeAL2 = Long.parseLong(tokens.nextToken());
-      } else if (attr.endsWith(":totalTimeTPmS")) {
-        totalTimeTPmS = Long.parseLong(tokens.nextToken());
-      } else if (attr.endsWith(":latemaps")) {
-        lateMaps = Long.parseLong(tokens.nextToken());
-      } else if (attr.endsWith(":numOfExceptions")) {
-        numOfExceptions = Long.parseLong(tokens.nextToken());
-      } else if (attr.endsWith(":successfulFileOps")) {
-        successfulFileOps = Long.parseLong(tokens.nextToken());
-      } else if (attr.endsWith(":mapStartTimeTPmS")) {
-        mapStartTimeTPmS = Long.parseLong(tokens.nextToken());
-      } else if (attr.endsWith(":mapEndTimeTPmS")) {
-        mapEndTimeTPmS = Long.parseLong(tokens.nextToken());
-      }
-    }
-    
-    // Average latency is the average time to perform 'n' number of
-    // operations, n being the number of files
-    double avgLatency1 = (double) totalTimeAL1 / (double) successfulFileOps;
-    double avgLatency2 = (double) totalTimeAL2 / (double) successfulFileOps;
-    
-    // The time it takes for the longest running map is measured. Using that,
-    // cluster transactions per second is calculated. It includes time to 
-    // retry any of the failed operations
-    double longestMapTimeTPmS = (double) (mapEndTimeTPmS - mapStartTimeTPmS);
-    double totalTimeTPS = (longestMapTimeTPmS == 0) ?
-            (1000 * successfulFileOps) :
-            (double) (1000 * successfulFileOps) / (double) longestMapTimeTPmS;
-            
-    // The time it takes to perform 'n' operations is calculated (in ms),
-    // n being the number of files. Using that time, the average execution 
-    // time is calculated. It includes time to retry any of the
-    // failed operations
-    double AverageExecutionTime = (totalTimeTPmS == 0) ?
-        (double) successfulFileOps : 
-        (double) (totalTimeTPmS / successfulFileOps);
-            
-    if (operation.equals(OP_CREATE_WRITE)) {
-      // For create/write/close, it is treated as two transactions,
-      // since a file create from a client perspective involves create and close
-      resultTPSLine1 = "               TPS: Create/Write/Close: " + 
-        (int) (totalTimeTPS * 2);
-      resultTPSLine2 = "Avg exec time (ms): Create/Write/Close: " + 
-        (double) AverageExecutionTime;
-      resultALLine1 = "            Avg Lat (ms): Create/Write: " + avgLatency1;
-      resultALLine2 = "                   Avg Lat (ms): Close: " + avgLatency2;
-    } else if (operation.equals(OP_OPEN_READ)) {
-      resultTPSLine1 = "                        TPS: Open/Read: " + 
-        (int) totalTimeTPS;
-      resultTPSLine2 = "         Avg Exec time (ms): Open/Read: " + 
-        (double) AverageExecutionTime;
-      resultALLine1 = "                    Avg Lat (ms): Open: " + avgLatency1;
-      if (readFileAfterOpen) {
-        resultALLine2 = "                  Avg Lat (ms): Read: " + avgLatency2;
-      }
-    } else if (operation.equals(OP_RENAME)) {
-      resultTPSLine1 = "                           TPS: Rename: " + 
-        (int) totalTimeTPS;
-      resultTPSLine2 = "            Avg Exec time (ms): Rename: " + 
-        (double) AverageExecutionTime;
-      resultALLine1 = "                  Avg Lat (ms): Rename: " + avgLatency1;
-    } else if (operation.equals(OP_DELETE)) {
-      resultTPSLine1 = "                           TPS: Delete: " + 
-        (int) totalTimeTPS;
-      resultTPSLine2 = "            Avg Exec time (ms): Delete: " + 
-        (double) AverageExecutionTime;
-      resultALLine1 = "                  Avg Lat (ms): Delete: " + avgLatency1;
-    }
-    
-    String resultLines[] = {
-    "-------------- NNBench -------------- : ",
-    "                               Version: " + NNBENCH_VERSION,
-    "                           Date & time: " + sdf.format(new Date(
-            System.currentTimeMillis())),
-    "",
-    "                        Test Operation: " + operation,
-    "                            Start time: " + 
-      sdf.format(new Date(startTime)),
-    "                           Maps to run: " + numberOfMaps,
-    "                        Reduces to run: " + numberOfReduces,
-    "                    Block Size (bytes): " + blockSize,
-    "                        Bytes to write: " + bytesToWrite,
-    "                    Bytes per checksum: " + bytesPerChecksum,
-    "                       Number of files: " + numberOfFiles,
-    "                    Replication factor: " + replicationFactorPerFile,
-    "            Successful file operations: " + successfulFileOps,
-    "",
-    "        # maps that missed the barrier: " + lateMaps,
-    "                          # exceptions: " + numOfExceptions,
-    "",
-    resultTPSLine1,
-    resultTPSLine2,
-    resultALLine1,
-    resultALLine2,
-    "",
-    "                 RAW DATA: AL Total #1: " + totalTimeAL1,
-    "                 RAW DATA: AL Total #2: " + totalTimeAL2,
-    "              RAW DATA: TPS Total (ms): " + totalTimeTPmS,
-    "       RAW DATA: Longest Map Time (ms): " + longestMapTimeTPmS,
-    "                   RAW DATA: Late maps: " + lateMaps,
-    "             RAW DATA: # of exceptions: " + numOfExceptions,
-    "" };
-
-    PrintStream res = new PrintStream(new FileOutputStream(
-            new File(DEFAULT_RES_FILE_NAME), true));
-    
-    // Write to a file and also dump to log
-    for(int i = 0; i < resultLines.length; i++) {
-      LOG.info(resultLines[i]);
-      res.println(resultLines[i]);
-    }
-  }
-  
-  /**
-   * Run the test
-   * 
-   * @throws IOException on error
-   */
-  public static void runTests() throws IOException {
-    config.setLong("io.bytes.per.checksum", bytesPerChecksum);
-    
-    JobConf job = new JobConf(config, NNBench.class);
-
-    job.setJobName("NNBench-" + operation);
-    FileInputFormat.setInputPaths(job, new Path(baseDir, CONTROL_DIR_NAME));
-    job.setInputFormat(SequenceFileInputFormat.class);
-    
-    // Explicitly set number of max map attempts to 1.
-    job.setMaxMapAttempts(1);
-    
-    // Explicitly turn off speculative execution
-    job.setSpeculativeExecution(false);
-
-    job.setMapperClass(NNBenchMapper.class);
-    job.setReducerClass(NNBenchReducer.class);
-
-    FileOutputFormat.setOutputPath(job, new Path(baseDir, OUTPUT_DIR_NAME));
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(Text.class);
-    job.setNumReduceTasks((int) numberOfReduces);
-    JobClient.runJob(job);
-  }
-  
-  /**
-   * Validate the inputs
-   */
-  public static void validateInputs() {
-    // If it is not one of the four operations, then fail
-    if (!operation.equals(OP_CREATE_WRITE) &&
-            !operation.equals(OP_OPEN_READ) &&
-            !operation.equals(OP_RENAME) &&
-            !operation.equals(OP_DELETE)) {
-      System.err.println("Error: Unknown operation: " + operation);
-      displayUsage();
-      System.exit(-1);
-    }
-    
-    // If number of maps is a negative number, then fail
-    // Hadoop allows the number of maps to be 0
-    if (numberOfMaps < 0) {
-      System.err.println("Error: Number of maps must be a positive number");
-      displayUsage();
-      System.exit(-1);
-    }
-    
-    // If number of reduces is a negative number or 0, then fail
-    if (numberOfReduces <= 0) {
-      System.err.println("Error: Number of reduces must be a positive number");
-      displayUsage();
-      System.exit(-1);
-    }
-
-    // If blocksize is a negative number or 0, then fail
-    if (blockSize <= 0) {
-      System.err.println("Error: Block size must be a positive number");
-      displayUsage();
-      System.exit(-1);
-    }
-    
-    // If bytes to write is a negative number, then fail
-    if (bytesToWrite < 0) {
-      System.err.println("Error: Bytes to write must be a positive number");
-      displayUsage();
-      System.exit(-1);
-    }
-    
-    // If bytes per checksum is a negative number, then fail
-    if (bytesPerChecksum < 0) {
-      System.err.println("Error: Bytes per checksum must be a positive number");
-      displayUsage();
-      System.exit(-1);
-    }
-    
-    // If number of files is a negative number, then fail
-    if (numberOfFiles < 0) {
-      System.err.println("Error: Number of files must be a positive number");
-      displayUsage();
-      System.exit(-1);
-    }
-    
-    // If replication factor is a negative number, then fail
-    if (replicationFactorPerFile < 0) {
-      System.err.println("Error: Replication factor must be a positive number");
-      displayUsage();
-      System.exit(-1);
-    }
-    
-    // If block size is not a multiple of bytesperchecksum, fail
-    if (blockSize % bytesPerChecksum != 0) {
-      System.err.println("Error: Block Size in bytes must be a multiple of " +
-              "bytes per checksum: ");
-      displayUsage();
-      System.exit(-1);
-    }
-  }
-  /**
-  * Main method for running the NNBench benchmarks
-  *
-  * @throws IOException indicates a problem with test startup
-  */
-  public static void main(String[] args) throws IOException {
-    // Display the application version string
-    displayVersion();
-
-    // Parse the inputs
-    parseInputs(args);
-    
-    // Validate inputs
-    validateInputs();
-    
-    // Clean up files before the test run
-    cleanupBeforeTestrun();
-    
-    // Create control files before test run
-    createControlFiles();
-
-    // Run the tests as a map reduce job
-    runTests();
-    
-    // Analyze results
-    analyzeResults();
-  }
-
-  
-  /**
-   * Mapper class
-   */
-  static class NNBenchMapper extends Configured 
-          implements Mapper<Text, LongWritable, Text, Text> {
-    FileSystem filesystem = null;
-    private String hostName = null;
-
-    long numberOfFiles = 1l;
-    long blkSize = 1l;
-    short replFactor = 1;
-    int bytesToWrite = 0;
-    String baseDir = null;
-    String dataDirName = null;
-    String op = null;
-    boolean readFile = false;
-    final int MAX_OPERATION_EXCEPTIONS = 1000;
-    
-    // Data to collect from the operation
-    int numOfExceptions = 0;
-    long startTimeAL = 0l;
-    long totalTimeAL1 = 0l;
-    long totalTimeAL2 = 0l;
-    long successfulFileOps = 0l;
-    
-    /**
-     * Constructor
-     */
-    public NNBenchMapper() {
-    }
-    
-    /**
-     * Mapper base implementation
-     */
-    public void configure(JobConf conf) {
-      setConf(conf);
-      
-      try {
-        filesystem = FileSystem.get(conf);
-      } catch(Exception e) {
-        throw new RuntimeException("Cannot get file system.", e);
-      }
-      
-      try {
-        hostName = InetAddress.getLocalHost().getHostName();
-      } catch(Exception e) {
-        throw new RuntimeException("Error getting hostname", e);
-      }
-    }
-    
-    /**
-     * Mapper base implementation
-     */
-    public void close() throws IOException {
-    }
-    
-    /**
-    * Returns when the current number of seconds from the epoch equals
-    * the command line argument given by <code>-startTime</code>.
-    * This allows multiple instances of this program, running on clock
-    * synchronized nodes, to start at roughly the same time.
-    */
-    private boolean barrier() {
-      long startTime = getConf().getLong("test.nnbench.starttime", 0l);
-      long currentTime = System.currentTimeMillis();
-      long sleepTime = startTime - currentTime;
-      boolean retVal = false;
-      
-      // If the sleep time is greater than 0, then sleep and return
-      if (sleepTime > 0) {
-        LOG.info("Waiting in barrier for: " + sleepTime + " ms");
-      
-        try {
-          Thread.sleep(sleepTime);
-          retVal = true;
-        } catch (Exception e) {
-          retVal = false;
-        }
-      }
-      
-      return retVal;
-    }
-    
-    /**
-     * Map method
-     */ 
-    public void map(Text key, 
-            LongWritable value,
-            OutputCollector<Text, Text> output,
-            Reporter reporter) throws IOException {
-      Configuration conf = filesystem.getConf();
-      
-      numberOfFiles = conf.getLong("test.nnbench.numberoffiles", 1l);
-      blkSize = conf.getLong("test.nnbench.blocksize", 1l);
-      replFactor = (short) (conf.getInt("test.nnbench.replicationfactor", 1));
-      bytesToWrite = conf.getInt("test.nnbench.bytestowrite", 0);
-      baseDir = conf.get("test.nnbench.basedir");
-      dataDirName = conf.get("test.nnbench.datadir.name");
-      op = conf.get("test.nnbench.operation");
-      readFile = conf.getBoolean("test.nnbench.readFileAfterOpen", false);
-      
-      long totalTimeTPmS = 0l;
-      long startTimeTPmS = 0l;
-      long endTimeTPms = 0l;
-      
-      numOfExceptions = 0;
-      startTimeAL = 0l;
-      totalTimeAL1 = 0l;
-      totalTimeAL2 = 0l;
-      successfulFileOps = 0l;
-      
-      if (barrier()) {
-        if (op.equals(OP_CREATE_WRITE)) {
-          startTimeTPmS = System.currentTimeMillis();
-          doCreateWriteOp("file_" + hostName + "_", output, reporter);
-        } else if (op.equals(OP_OPEN_READ)) {
-          startTimeTPmS = System.currentTimeMillis();
-          doOpenReadOp("file_" + hostName + "_", output, reporter);
-        } else if (op.equals(OP_RENAME)) {
-          startTimeTPmS = System.currentTimeMillis();
-          doRenameOp("file_" + hostName + "_", output, reporter);
-        } else if (op.equals(OP_DELETE)) {
-          startTimeTPmS = System.currentTimeMillis();
-          doDeleteOp("file_" + hostName + "_", output, reporter);
-        }
-        
-        endTimeTPms = System.currentTimeMillis();
-        totalTimeTPmS = endTimeTPms - startTimeTPmS;
-      } else {
-        output.collect(new Text("l:latemaps"), new Text("1"));
-      }
-      
-      // collect after the map end time is measured
-      output.collect(new Text("l:totalTimeAL1"), 
-          new Text(String.valueOf(totalTimeAL1)));
-      output.collect(new Text("l:totalTimeAL2"), 
-          new Text(String.valueOf(totalTimeAL2)));
-      output.collect(new Text("l:numOfExceptions"), 
-          new Text(String.valueOf(numOfExceptions)));
-      output.collect(new Text("l:successfulFileOps"), 
-          new Text(String.valueOf(successfulFileOps)));
-      output.collect(new Text("l:totalTimeTPmS"), 
-              new Text(String.valueOf(totalTimeTPmS)));
-      output.collect(new Text("min:mapStartTimeTPmS"), 
-          new Text(String.valueOf(startTimeTPmS)));
-      output.collect(new Text("max:mapEndTimeTPmS"), 
-          new Text(String.valueOf(endTimeTPms)));
-    }
-    
-    /**
-     * Create and Write operation.
-     */
-    private void doCreateWriteOp(String name,
-            OutputCollector<Text, Text> output,
-            Reporter reporter) {
-      FSDataOutputStream out = null;
-      byte[] buffer = new byte[bytesToWrite];
-      
-      for (long l = 0l; l < numberOfFiles; l++) {
-        Path filePath = new Path(new Path(baseDir, dataDirName), 
-                name + "_" + l);
-
-        boolean successfulOp = false;
-        while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
-          try {
-            // Set up timer for measuring AL (transaction #1)
-            startTimeAL = System.currentTimeMillis();
-            // Create the file
-            // Use a buffer size of 512
-            out = filesystem.create(filePath, 
-                    true, 
-                    512, 
-                    replFactor, 
-                    blkSize);
-            out.write(buffer);
-            totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
-
-            // Close the file / file output stream
-            // Set up timers for measuring AL (transaction #2)
-            startTimeAL = System.currentTimeMillis();
-            out.close();
-            
-            totalTimeAL2 += (System.currentTimeMillis() - startTimeAL);
-            successfulOp = true;
-            successfulFileOps ++;
-
-            reporter.setStatus("Finish "+ l + " files");
-          } catch (IOException e) {
-            LOG.info("Exception recorded in op: " +
-                    "Create/Write/Close");
- 
-            numOfExceptions++;
-          }
-        }
-      }
-    }
-    
-    /**
-     * Open operation
-     */
-    private void doOpenReadOp(String name,
-            OutputCollector<Text, Text> output,
-            Reporter reporter) {
-      FSDataInputStream input = null;
-      byte[] buffer = new byte[bytesToWrite];
-      
-      for (long l = 0l; l < numberOfFiles; l++) {
-        Path filePath = new Path(new Path(baseDir, dataDirName), 
-                name + "_" + l);
-
-        boolean successfulOp = false;
-        while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
-          try {
-            // Set up timer for measuring AL
-            startTimeAL = System.currentTimeMillis();
-            input = filesystem.open(filePath);
-            totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
-            
-            // If the file needs to be read (specified at command line)
-            if (readFile) {
-              startTimeAL = System.currentTimeMillis();
-              input.readFully(buffer);
-
-              totalTimeAL2 += (System.currentTimeMillis() - startTimeAL);
-            }
-            input.close();
-            successfulOp = true;
-            successfulFileOps ++;
-
-            reporter.setStatus("Finish "+ l + " files");
-          } catch (IOException e) {
-            LOG.info("Exception recorded in op: OpenRead " + e);
-            numOfExceptions++;
-          }
-        }
-      }
-    }
-    
-    /**
-     * Rename operation
-     */
-    private void doRenameOp(String name,
-            OutputCollector<Text, Text> output,
-            Reporter reporter) {
-      for (long l = 0l; l < numberOfFiles; l++) {
-        Path filePath = new Path(new Path(baseDir, dataDirName), 
-                name + "_" + l);
-        Path filePathR = new Path(new Path(baseDir, dataDirName), 
-                name + "_r_" + l);
-
-        boolean successfulOp = false;
-        while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
-          try {
-            // Set up timer for measuring AL
-            startTimeAL = System.currentTimeMillis();
-            filesystem.rename(filePath, filePathR);
-            totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
-            
-            successfulOp = true;
-            successfulFileOps ++;
-
-            reporter.setStatus("Finish "+ l + " files");
-          } catch (IOException e) {
-            LOG.info("Exception recorded in op: Rename");
-
-            numOfExceptions++;
-          }
-        }
-      }
-    }
-    
-    /**
-     * Delete operation
-     */
-    private void doDeleteOp(String name,
-            OutputCollector<Text, Text> output,
-            Reporter reporter) {
-      for (long l = 0l; l < numberOfFiles; l++) {
-        Path filePath = new Path(new Path(baseDir, dataDirName), 
-                name + "_" + l);
-        
-        boolean successfulOp = false;
-        while (! successfulOp && numOfExceptions < MAX_OPERATION_EXCEPTIONS) {
-          try {
-            // Set up timer for measuring AL
-            startTimeAL = System.currentTimeMillis();
-            filesystem.delete(filePath, true);
-            totalTimeAL1 += (System.currentTimeMillis() - startTimeAL);
-            
-            successfulOp = true;
-            successfulFileOps ++;
-
-            reporter.setStatus("Finish "+ l + " files");
-          } catch (IOException e) {
-            LOG.info("Exception in recorded op: Delete");
-
-            numOfExceptions++;
-          }
-        }
-      }
-    }
-  }
-  
-  /**
-   * Reducer class
-   */
-  static class NNBenchReducer extends MapReduceBase
-      implements Reducer<Text, Text, Text, Text> {
-
-    protected String hostName;
-
-    public NNBenchReducer () {
-      LOG.info("Starting NNBenchReducer !!!");
-      try {
-        hostName = java.net.InetAddress.getLocalHost().getHostName();
-      } catch(Exception e) {
-        hostName = "localhost";
-      }
-      LOG.info("Starting NNBenchReducer on " + hostName);
-    }
-
-    /**
-     * Reduce method
-     */
-    public void reduce(Text key, 
-                       Iterator<Text> values,
-                       OutputCollector<Text, Text> output, 
-                       Reporter reporter
-                       ) throws IOException {
-      String field = key.toString();
-      
-      reporter.setStatus("starting " + field + " ::host = " + hostName);
-      
-      // sum long values
-      if (field.startsWith("l:")) {
-        long lSum = 0;
-        while (values.hasNext()) {
-          lSum += Long.parseLong(values.next().toString());
-        }
-        output.collect(key, new Text(String.valueOf(lSum)));
-      }
-      
-      if (field.startsWith("min:")) {
-        long minVal = -1;
-        while (values.hasNext()) {
-          long value = Long.parseLong(values.next().toString());
-          
-          if (minVal == -1) {
-            minVal = value;
-          } else {
-            if (value != 0 && value < minVal) {
-              minVal = value;
-            }
-          }
-        }
-        output.collect(key, new Text(String.valueOf(minVal)));
-      }
-      
-      if (field.startsWith("max:")) {
-        long maxVal = -1;
-        while (values.hasNext()) {
-          long value = Long.parseLong(values.next().toString());
-          
-          if (maxVal == -1) {
-            maxVal = value;
-          } else {
-            if (value > maxVal) {
-              maxVal = value;
-            }
-          }
-        }
-        output.collect(key, new Text(String.valueOf(maxVal)));
-      }
-      
-      reporter.setStatus("finished " + field + " ::host = " + hostName);
-    }
-  }
-}

+ 0 - 344
src/test/hdfs-with-mr/org/apache/hadoop/hdfs/NNBenchWithoutMR.java

@@ -1,344 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import java.util.Date;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.util.StringUtils;
-
-/**
- * This program executes a specified operation that applies load to 
- * the NameNode. Possible operations include create/writing files,
- * opening/reading files, renaming files, and deleting files.
- * 
- * When run simultaneously on multiple nodes, this program functions 
- * as a stress-test and benchmark for namenode, especially when 
- * the number of bytes written to each file is small.
- * 
- * This version does not use the map reduce framework
- * 
- */
-public class NNBenchWithoutMR {
-  
-  private static final Log LOG = LogFactory.getLog(
-                                            "org.apache.hadoop.hdfs.NNBench");
-  
-  // variable initialzed from command line arguments
-  private static long startTime = 0;
-  private static int numFiles = 0;
-  private static long bytesPerBlock = 1;
-  private static long blocksPerFile = 0;
-  private static long bytesPerFile = 1;
-  private static Path baseDir = null;
-    
-  // variables initialized in main()
-  private static FileSystem fileSys = null;
-  private static Path taskDir = null;
-  private static String uniqueId = null;
-  private static byte[] buffer;
-  private static long maxExceptionsPerFile = 200;
-    
-  /**
-   * Returns when the current number of seconds from the epoch equals
-   * the command line argument given by <code>-startTime</code>.
-   * This allows multiple instances of this program, running on clock
-   * synchronized nodes, to start at roughly the same time.
-   */
-  static void barrier() {
-    long sleepTime;
-    while ((sleepTime = startTime - System.currentTimeMillis()) > 0) {
-      try {
-        Thread.sleep(sleepTime);
-      } catch (InterruptedException ex) {
-      }
-    }
-  }
-    
-  static private void handleException(String operation, Throwable e, 
-                                      int singleFileExceptions) {
-    LOG.warn("Exception while " + operation + ": " +
-             StringUtils.stringifyException(e));
-    if (singleFileExceptions >= maxExceptionsPerFile) {
-      throw new RuntimeException(singleFileExceptions + 
-        " exceptions for a single file exceeds threshold. Aborting");
-    }
-  }
-  
-  /**
-   * Create and write to a given number of files.  Repeat each remote
-   * operation until is suceeds (does not throw an exception).
-   *
-   * @return the number of exceptions caught
-   */
-  static int createWrite() {
-    int totalExceptions = 0;
-    FSDataOutputStream out = null;
-    boolean success = false;
-    for (int index = 0; index < numFiles; index++) {
-      int singleFileExceptions = 0;
-      do { // create file until is succeeds or max exceptions reached
-        try {
-          out = fileSys.create(
-                               new Path(taskDir, "" + index), false, 512, (short)1, bytesPerBlock);
-          success = true;
-        } catch (IOException ioe) { 
-          success=false; 
-          totalExceptions++;
-          handleException("creating file #" + index, ioe, ++singleFileExceptions);
-        }
-      } while (!success);
-      long toBeWritten = bytesPerFile;
-      while (toBeWritten > 0) {
-        int nbytes = (int) Math.min(buffer.length, toBeWritten);
-        toBeWritten -= nbytes;
-        try { // only try once
-          out.write(buffer, 0, nbytes);
-        } catch (IOException ioe) {
-          totalExceptions++;
-          handleException("writing to file #" + index, ioe, ++singleFileExceptions);
-        }
-      }
-      do { // close file until is succeeds
-        try {
-          out.close();
-          success = true;
-        } catch (IOException ioe) {
-          success=false; 
-          totalExceptions++;
-          handleException("closing file #" + index, ioe, ++singleFileExceptions);
-        }
-      } while (!success);
-    }
-    return totalExceptions;
-  }
-    
-  /**
-   * Open and read a given number of files.
-   *
-   * @return the number of exceptions caught
-   */
-  static int openRead() {
-    int totalExceptions = 0;
-    FSDataInputStream in = null;
-    for (int index = 0; index < numFiles; index++) {
-      int singleFileExceptions = 0;
-      try {
-        in = fileSys.open(new Path(taskDir, "" + index), 512);
-        long toBeRead = bytesPerFile;
-        while (toBeRead > 0) {
-          int nbytes = (int) Math.min(buffer.length, toBeRead);
-          toBeRead -= nbytes;
-          try { // only try once
-            in.read(buffer, 0, nbytes);
-          } catch (IOException ioe) {
-            totalExceptions++;
-            handleException("reading from file #" + index, ioe, ++singleFileExceptions);
-          }
-        }
-        in.close();
-      } catch (IOException ioe) { 
-        totalExceptions++;
-        handleException("opening file #" + index, ioe, ++singleFileExceptions);
-      }
-    }
-    return totalExceptions;
-  }
-    
-  /**
-   * Rename a given number of files.  Repeat each remote
-   * operation until is suceeds (does not throw an exception).
-   *
-   * @return the number of exceptions caught
-   */
-  static int rename() {
-    int totalExceptions = 0;
-    boolean success = false;
-    for (int index = 0; index < numFiles; index++) {
-      int singleFileExceptions = 0;
-      do { // rename file until is succeeds
-        try {
-          boolean result = fileSys.rename(
-                                          new Path(taskDir, "" + index), new Path(taskDir, "A" + index));
-          success = true;
-        } catch (IOException ioe) { 
-          success=false; 
-          totalExceptions++;
-          handleException("creating file #" + index, ioe, ++singleFileExceptions);
-       }
-      } while (!success);
-    }
-    return totalExceptions;
-  }
-    
-  /**
-   * Delete a given number of files.  Repeat each remote
-   * operation until is suceeds (does not throw an exception).
-   *
-   * @return the number of exceptions caught
-   */
-  static int delete() {
-    int totalExceptions = 0;
-    boolean success = false;
-    for (int index = 0; index < numFiles; index++) {
-      int singleFileExceptions = 0;
-      do { // delete file until is succeeds
-        try {
-          boolean result = fileSys.delete(new Path(taskDir, "A" + index), true);
-          success = true;
-        } catch (IOException ioe) { 
-          success=false; 
-          totalExceptions++;
-          handleException("creating file #" + index, ioe, ++singleFileExceptions);
-        }
-      } while (!success);
-    }
-    return totalExceptions;
-  }
-    
-  /**
-   * This launches a given namenode operation (<code>-operation</code>),
-   * starting at a given time (<code>-startTime</code>).  The files used
-   * by the openRead, rename, and delete operations are the same files
-   * created by the createWrite operation.  Typically, the program
-   * would be run four times, once for each operation in this order:
-   * createWrite, openRead, rename, delete.
-   *
-   * <pre>
-   * Usage: nnbench 
-   *          -operation <one of createWrite, openRead, rename, or delete>
-   *          -baseDir <base output/input DFS path>
-   *          -startTime <time to start, given in seconds from the epoch>
-   *          -numFiles <number of files to create, read, rename, or delete>
-   *          -blocksPerFile <number of blocks to create per file>
-   *         [-bytesPerBlock <number of bytes to write to each block, default is 1>]
-   *         [-bytesPerChecksum <value for io.bytes.per.checksum>]
-   * </pre>
-   *
-   * @throws IOException indicates a problem with test startup
-   */
-  public static void main(String[] args) throws IOException {
-    String version = "NameNodeBenchmark.0.3";
-    System.out.println(version);
-    int bytesPerChecksum = -1;
-    
-    String usage =
-      "Usage: nnbench " +
-      "  -operation <one of createWrite, openRead, rename, or delete> " +
-      "  -baseDir <base output/input DFS path> " +
-      "  -startTime <time to start, given in seconds from the epoch> " +
-      "  -numFiles <number of files to create> " +
-      "  -blocksPerFile <number of blocks to create per file> " +
-      "  [-bytesPerBlock <number of bytes to write to each block, default is 1>] " +
-      "  [-bytesPerChecksum <value for io.bytes.per.checksum>]" +
-      "Note: bytesPerBlock MUST be a multiple of bytesPerChecksum";
-    
-    String operation = null;
-    for (int i = 0; i < args.length; i++) { // parse command line
-      if (args[i].equals("-baseDir")) {
-        baseDir = new Path(args[++i]);
-      } else if (args[i].equals("-numFiles")) {
-        numFiles = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-blocksPerFile")) {
-        blocksPerFile = Integer.parseInt(args[++i]);
-      } else if (args[i].equals("-bytesPerBlock")) {
-        bytesPerBlock = Long.parseLong(args[++i]);
-      } else if (args[i].equals("-bytesPerChecksum")) {
-        bytesPerChecksum = Integer.parseInt(args[++i]);        
-      } else if (args[i].equals("-startTime")) {
-        startTime = Long.parseLong(args[++i]) * 1000;
-      } else if (args[i].equals("-operation")) {
-        operation = args[++i];
-      } else {
-        System.out.println(usage);
-        System.exit(-1);
-      }
-    }
-    bytesPerFile = bytesPerBlock * blocksPerFile;
-    
-    JobConf jobConf = new JobConf(new Configuration(), NNBench.class);
-    
-    if ( bytesPerChecksum < 0 ) { // if it is not set in cmdline
-      bytesPerChecksum = jobConf.getInt("io.bytes.per.checksum", 512);
-    }
-    jobConf.set("io.bytes.per.checksum", Integer.toString(bytesPerChecksum));
-    
-    System.out.println("Inputs: ");
-    System.out.println("   operation: " + operation);
-    System.out.println("   baseDir: " + baseDir);
-    System.out.println("   startTime: " + startTime);
-    System.out.println("   numFiles: " + numFiles);
-    System.out.println("   blocksPerFile: " + blocksPerFile);
-    System.out.println("   bytesPerBlock: " + bytesPerBlock);
-    System.out.println("   bytesPerChecksum: " + bytesPerChecksum);
-    
-    if (operation == null ||  // verify args
-        baseDir == null ||
-        numFiles < 1 ||
-        blocksPerFile < 1 ||
-        bytesPerBlock < 0 ||
-        bytesPerBlock % bytesPerChecksum != 0)
-      {
-        System.err.println(usage);
-        System.exit(-1);
-      }
-    
-    fileSys = FileSystem.get(jobConf);
-    uniqueId = java.net.InetAddress.getLocalHost().getHostName();
-    taskDir = new Path(baseDir, uniqueId);
-    // initialize buffer used for writing/reading file
-    buffer = new byte[(int) Math.min(bytesPerFile, 32768L)];
-    
-    Date execTime;
-    Date endTime;
-    long duration;
-    int exceptions = 0;
-    barrier(); // wait for coordinated start time
-    execTime = new Date();
-    System.out.println("Job started: " + startTime);
-    if (operation.equals("createWrite")) {
-      if (!fileSys.mkdirs(taskDir)) {
-        throw new IOException("Mkdirs failed to create " + taskDir.toString());
-      }
-      exceptions = createWrite();
-    } else if (operation.equals("openRead")) {
-      exceptions = openRead();
-    } else if (operation.equals("rename")) {
-      exceptions = rename();
-    } else if (operation.equals("delete")) {
-      exceptions = delete();
-    } else {
-      System.err.println(usage);
-      System.exit(-1);
-    }
-    endTime = new Date();
-    System.out.println("Job ended: " + endTime);
-    duration = (endTime.getTime() - execTime.getTime()) /1000;
-    System.out.println("The " + operation + " job took " + duration + " seconds.");
-    System.out.println("The job recorded " + exceptions + " exceptions.");
-  }
-}

+ 0 - 603
src/test/hdfs-with-mr/org/apache/hadoop/io/FileBench.java

@@ -1,603 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.io;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.Date;
-import java.util.EnumSet;
-import java.util.HashMap;
-import java.util.Map;
-import java.util.Random;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.GzipCodec;
-import org.apache.hadoop.io.Text;
-import org.apache.hadoop.mapred.*;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-public class FileBench extends Configured implements Tool {
-
-  static int printUsage() {
-    ToolRunner.printGenericCommandUsage(System.out);
-    System.out.println(
-"Usage: Task list:           -[no]r -[no]w\n" +
-"       Format:              -[no]seq -[no]txt\n" +
-"       CompressionCodec:    -[no]zip -[no]pln\n" +
-"       CompressionType:     -[no]blk -[no]rec\n" +
-"       Required:            -dir <working dir>\n" +
-"All valid combinations are implicitly enabled, unless an option is enabled\n" +
-"explicitly. For example, specifying \"-zip\", excludes -pln,\n" +
-"unless they are also explicitly included, as in \"-pln -zip\"\n" +
-"Note that CompressionType params only apply to SequenceFiles\n\n" +
-"Useful options to set:\n" +
-"-D fs.default.name=\"file:///\" \\\n" +
-"-D fs.file.impl=org.apache.hadoop.fs.RawLocalFileSystem \\\n" +
-"-D filebench.file.bytes=$((10*1024*1024*1024)) \\\n" +
-"-D filebench.key.words=5 \\\n" +
-"-D filebench.val.words=20\n");
-    return -1;
-  }
-
-  static String[] keys;
-  static String[] values;
-  static StringBuilder sentence = new StringBuilder();
-
-  private static String generateSentence(Random r, int noWords) {
-    sentence.setLength(0);
-    for (int i=0; i < noWords; ++i) {
-      sentence.append(words[r.nextInt(words.length)]);
-      sentence.append(" ");
-    }
-    return sentence.toString();
-  }
-
-  // fill keys, values with ~1.5 blocks for block-compressed seq fill
-  private static void fillBlocks(JobConf conf) {
-    Random r = new Random();
-    long seed = conf.getLong("filebench.seed", -1);
-    if (seed > 0) {
-      r.setSeed(seed);
-    }
-
-    int keylen = conf.getInt("filebench.key.words", 5);
-    int vallen = conf.getInt("filebench.val.words", 20);
-    int acc = (3 * conf.getInt("io.seqfile.compress.blocksize", 1000000)) >> 1;
-    ArrayList<String> k = new ArrayList<String>();
-    ArrayList<String> v = new ArrayList<String>();
-    for (int i = 0; acc > 0; ++i) {
-      String s = generateSentence(r, keylen);
-      acc -= s.length();
-      k.add(s);
-      s = generateSentence(r, vallen);
-      acc -= s.length();
-      v.add(s);
-    }
-    keys = k.toArray(new String[0]);
-    values = v.toArray(new String[0]);
-  }
-
-  @SuppressWarnings("unchecked") // OutputFormat instantiation
-  static long writeBench(JobConf conf) throws IOException {
-    long filelen = conf.getLong("filebench.file.bytes", 5 * 1024 * 1024 * 1024);
-    Text key = new Text();
-    Text val = new Text();
-
-    final String fn = conf.get("test.filebench.name", "");
-    final Path outd = FileOutputFormat.getOutputPath(conf);
-    conf.set("mapred.work.output.dir", outd.toString());
-    OutputFormat outf = conf.getOutputFormat();
-    RecordWriter<Text,Text> rw =
-      outf.getRecordWriter(outd.getFileSystem(conf), conf, fn,
-                           Reporter.NULL);
-    try {
-      long acc = 0L;
-      Date start = new Date();
-      for (int i = 0; acc < filelen; ++i) {
-        i %= keys.length;
-        key.set(keys[i]);
-        val.set(values[i]);
-        rw.write(key, val);
-        acc += keys[i].length();
-        acc += values[i].length();
-      }
-      Date end = new Date();
-      return end.getTime() - start.getTime();
-    } finally {
-      rw.close(Reporter.NULL);
-    }
-  }
-
-  @SuppressWarnings("unchecked") // InputFormat instantiation
-  static long readBench(JobConf conf) throws IOException {
-    InputFormat inf = conf.getInputFormat();
-    final String fn = conf.get("test.filebench.name", "");
-    Path pin = new Path(FileInputFormat.getInputPaths(conf)[0], fn);
-    FileStatus in = pin.getFileSystem(conf).getFileStatus(pin);
-    RecordReader rr = inf.getRecordReader(new FileSplit(pin, 0, in.getLen(), 
-                                          (String[])null), conf, Reporter.NULL);
-    try {
-      Object key = rr.createKey();
-      Object val = rr.createValue();
-      Date start = new Date();
-      while (rr.next(key, val));
-      Date end = new Date();
-      return end.getTime() - start.getTime();
-    } finally {
-      rr.close();
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    int res = ToolRunner.run(new Configuration(), new FileBench(), args);
-    System.exit(res);
-  }
-
-  /**
-   * Process params from command line and run set of benchmarks specified.
-   */
-  public int run(String[] argv) throws IOException {
-    JobConf job = new JobConf(getConf());
-    EnumSet<CCodec> cc = null;
-    EnumSet<CType> ct = null;
-    EnumSet<Format> f = null;
-    EnumSet<RW> rw = null;
-    Path root = null;
-    FileSystem fs = FileSystem.get(job);
-    for(int i = 0; i < argv.length; ++i) {
-      try {
-        if ("-dir".equals(argv[i])) {
-          root = new Path(argv[++i]).makeQualified(fs);
-          System.out.println("DIR: " + root.toString());
-        } else if ("-seed".equals(argv[i])) {
-          job.setLong("filebench.seed", Long.valueOf(argv[++i]));
-        } else if (argv[i].startsWith("-no")) {
-          String arg = argv[i].substring(3);
-          cc = rem(CCodec.class, cc, arg);
-          ct = rem(CType.class, ct, arg);
-          f =  rem(Format.class, f, arg);
-          rw = rem(RW.class, rw, arg);
-        } else {
-          String arg = argv[i].substring(1);
-          cc = add(CCodec.class, cc, arg);
-          ct = add(CType.class, ct, arg);
-          f =  add(Format.class, f, arg);
-          rw = add(RW.class, rw, arg);
-        }
-      } catch (Exception e) {
-        throw (IOException)new IOException().initCause(e);
-      }
-    }
-    if (null == root) {
-      System.out.println("Missing -dir param");
-      printUsage();
-      return -1;
-    }
-
-    fillBlocks(job);
-    job.setOutputKeyClass(Text.class);
-    job.setOutputValueClass(Text.class);
-    FileInputFormat.setInputPaths(job, root);
-    FileOutputFormat.setOutputPath(job, root);
-
-    if (null == cc) cc = EnumSet.allOf(CCodec.class);
-    if (null == ct) ct = EnumSet.allOf(CType.class);
-    if (null == f)  f  = EnumSet.allOf(Format.class);
-    if (null == rw) rw = EnumSet.allOf(RW.class);
-    for (RW rwop : rw) {
-      for (Format fmt : f) {
-        fmt.configure(job);
-        for (CCodec cod : cc) {
-          cod.configure(job);
-          if (!(fmt == Format.txt || cod == CCodec.pln)) {
-            for (CType typ : ct) {
-              String fn =
-                fmt.name().toUpperCase() + "_" +
-                cod.name().toUpperCase() + "_" +
-                typ.name().toUpperCase();
-              typ.configure(job);
-              System.out.print(rwop.name().toUpperCase() + " " + fn + ": ");
-              System.out.println(rwop.exec(fn, job) / 1000 +
-                  " seconds");
-            }
-          } else {
-            String fn =
-              fmt.name().toUpperCase() + "_" +
-              cod.name().toUpperCase();
-            Path p = new Path(root, fn);
-            if (rwop == RW.r && !fs.exists(p)) {
-              fn += cod.getExt();
-            }
-            System.out.print(rwop.name().toUpperCase() + " " + fn + ": ");
-            System.out.println(rwop.exec(fn, job) / 1000 +
-                " seconds");
-          }
-        }
-      }
-    }
-    return 0;
-  }
-
-  // overwrought argument processing and wordlist follow
-  enum CCodec {
-    zip(GzipCodec.class, ".gz"), pln(null, "");
-
-    Class<? extends CompressionCodec> inf;
-    String ext;
-    CCodec(Class<? extends CompressionCodec> inf, String ext) {
-      this.inf = inf;
-      this.ext = ext;
-    }
-    public void configure(JobConf job) {
-      if (inf != null) {
-        job.setBoolean("mapred.output.compress", true);
-        job.setClass("mapred.output.compression.codec", inf,
-            CompressionCodec.class);
-      } else {
-        job.setBoolean("mapred.output.compress", false);
-      }
-    }
-    public String getExt() { return ext; }
-  }
-  enum CType {
-    blk("BLOCK"),
-    rec("RECORD");
-
-    String typ;
-    CType(String typ) { this.typ = typ; }
-    public void configure(JobConf job) {
-      job.set("mapred.map.output.compression.type", typ);
-      job.set("mapred.output.compression.type", typ);
-    }
-  }
-  enum Format {
-    seq(SequenceFileInputFormat.class, SequenceFileOutputFormat.class),
-    txt(TextInputFormat.class, TextOutputFormat.class);
-
-    Class<? extends InputFormat> inf;
-    Class<? extends OutputFormat> of;
-    Format(Class<? extends InputFormat> inf, Class<? extends OutputFormat> of) {
-      this.inf = inf;
-      this.of = of;
-    }
-    public void configure(JobConf job) {
-      if (null != inf) job.setInputFormat(inf);
-      if (null != of) job.setOutputFormat(of);
-    }
-  }
-  enum RW {
-    w() {
-      public long exec(String fn, JobConf job) throws IOException {
-        job.set("test.filebench.name", fn);
-        return writeBench(job);
-      }
-    },
-
-    r() {
-      public long exec(String fn, JobConf job) throws IOException {
-        job.set("test.filebench.name", fn);
-        return readBench(job);
-      }
-    };
-
-    public abstract long exec(String fn, JobConf job) throws IOException;
-  }
-  static Map<Class<? extends Enum>, Map<String,? extends Enum>> fullmap
-    = new HashMap<Class<? extends Enum>, Map<String,? extends Enum>>();
-  static {
-    // can't effectively use Enum::valueOf
-    Map<String,CCodec> m1 = new HashMap<String,CCodec>();
-    for (CCodec v : CCodec.values()) m1.put(v.name(), v);
-    fullmap.put(CCodec.class, m1);
-    Map<String,CType> m2 = new HashMap<String,CType>();
-    for (CType v : CType.values()) m2.put(v.name(), v);
-    fullmap.put(CType.class, m2);
-    Map<String,Format> m3 = new HashMap<String,Format>();
-    for (Format v : Format.values()) m3.put(v.name(), v);
-    fullmap.put(Format.class, m3);
-    Map<String,RW> m4 = new HashMap<String,RW>();
-    for (RW v : RW.values()) m4.put(v.name(), v);
-    fullmap.put(RW.class, m4);
-  }
-
-  public static <T extends Enum<T>> EnumSet<T> rem(Class<T> c,
-      EnumSet<T> set, String s) {
-    if (null != fullmap.get(c) && fullmap.get(c).get(s) != null) {
-      if (null == set) {
-        set = EnumSet.allOf(c);
-      }
-      set.remove(fullmap.get(c).get(s));
-    }
-    return set;
-  }
-
-  @SuppressWarnings("unchecked")
-  public static <T extends Enum<T>> EnumSet<T> add(Class<T> c,
-      EnumSet<T> set, String s) {
-    if (null != fullmap.get(c) && fullmap.get(c).get(s) != null) {
-      if (null == set) {
-        set = EnumSet.noneOf(c);
-      }
-      set.add((T)fullmap.get(c).get(s));
-    }
-    return set;
-  }
-
-  /**
-   * A random list of 1000 words from /usr/share/dict/words
-   */
-  private static final String[] words = {
-    "diurnalness", "Homoiousian", "spiranthic", "tetragynian",
-    "silverhead", "ungreat", "lithograph", "exploiter",
-    "physiologian", "by", "hellbender", "Filipendula",
-    "undeterring", "antiscolic", "pentagamist", "hypoid",
-    "cacuminal", "sertularian", "schoolmasterism", "nonuple",
-    "gallybeggar", "phytonic", "swearingly", "nebular",
-    "Confervales", "thermochemically", "characinoid", "cocksuredom",
-    "fallacious", "feasibleness", "debromination", "playfellowship",
-    "tramplike", "testa", "participatingly", "unaccessible",
-    "bromate", "experientialist", "roughcast", "docimastical",
-    "choralcelo", "blightbird", "peptonate", "sombreroed",
-    "unschematized", "antiabolitionist", "besagne", "mastication",
-    "bromic", "sviatonosite", "cattimandoo", "metaphrastical",
-    "endotheliomyoma", "hysterolysis", "unfulminated", "Hester",
-    "oblongly", "blurredness", "authorling", "chasmy",
-    "Scorpaenidae", "toxihaemia", "Dictograph", "Quakerishly",
-    "deaf", "timbermonger", "strammel", "Thraupidae",
-    "seditious", "plerome", "Arneb", "eristically",
-    "serpentinic", "glaumrie", "socioromantic", "apocalypst",
-    "tartrous", "Bassaris", "angiolymphoma", "horsefly",
-    "kenno", "astronomize", "euphemious", "arsenide",
-    "untongued", "parabolicness", "uvanite", "helpless",
-    "gemmeous", "stormy", "templar", "erythrodextrin",
-    "comism", "interfraternal", "preparative", "parastas",
-    "frontoorbital", "Ophiosaurus", "diopside", "serosanguineous",
-    "ununiformly", "karyological", "collegian", "allotropic",
-    "depravity", "amylogenesis", "reformatory", "epidymides",
-    "pleurotropous", "trillium", "dastardliness", "coadvice",
-    "embryotic", "benthonic", "pomiferous", "figureheadship",
-    "Megaluridae", "Harpa", "frenal", "commotion",
-    "abthainry", "cobeliever", "manilla", "spiciferous",
-    "nativeness", "obispo", "monilioid", "biopsic",
-    "valvula", "enterostomy", "planosubulate", "pterostigma",
-    "lifter", "triradiated", "venialness", "tum",
-    "archistome", "tautness", "unswanlike", "antivenin",
-    "Lentibulariaceae", "Triphora", "angiopathy", "anta",
-    "Dawsonia", "becomma", "Yannigan", "winterproof",
-    "antalgol", "harr", "underogating", "ineunt",
-    "cornberry", "flippantness", "scyphostoma", "approbation",
-    "Ghent", "Macraucheniidae", "scabbiness", "unanatomized",
-    "photoelasticity", "eurythermal", "enation", "prepavement",
-    "flushgate", "subsequentially", "Edo", "antihero",
-    "Isokontae", "unforkedness", "porriginous", "daytime",
-    "nonexecutive", "trisilicic", "morphiomania", "paranephros",
-    "botchedly", "impugnation", "Dodecatheon", "obolus",
-    "unburnt", "provedore", "Aktistetae", "superindifference",
-    "Alethea", "Joachimite", "cyanophilous", "chorograph",
-    "brooky", "figured", "periclitation", "quintette",
-    "hondo", "ornithodelphous", "unefficient", "pondside",
-    "bogydom", "laurinoxylon", "Shiah", "unharmed",
-    "cartful", "noncrystallized", "abusiveness", "cromlech",
-    "japanned", "rizzomed", "underskin", "adscendent",
-    "allectory", "gelatinousness", "volcano", "uncompromisingly",
-    "cubit", "idiotize", "unfurbelowed", "undinted",
-    "magnetooptics", "Savitar", "diwata", "ramosopalmate",
-    "Pishquow", "tomorn", "apopenptic", "Haversian",
-    "Hysterocarpus", "ten", "outhue", "Bertat",
-    "mechanist", "asparaginic", "velaric", "tonsure",
-    "bubble", "Pyrales", "regardful", "glyphography",
-    "calabazilla", "shellworker", "stradametrical", "havoc",
-    "theologicopolitical", "sawdust", "diatomaceous", "jajman",
-    "temporomastoid", "Serrifera", "Ochnaceae", "aspersor",
-    "trailmaking", "Bishareen", "digitule", "octogynous",
-    "epididymitis", "smokefarthings", "bacillite", "overcrown",
-    "mangonism", "sirrah", "undecorated", "psychofugal",
-    "bismuthiferous", "rechar", "Lemuridae", "frameable",
-    "thiodiazole", "Scanic", "sportswomanship", "interruptedness",
-    "admissory", "osteopaedion", "tingly", "tomorrowness",
-    "ethnocracy", "trabecular", "vitally", "fossilism",
-    "adz", "metopon", "prefatorial", "expiscate",
-    "diathermacy", "chronist", "nigh", "generalizable",
-    "hysterogen", "aurothiosulphuric", "whitlowwort", "downthrust",
-    "Protestantize", "monander", "Itea", "chronographic",
-    "silicize", "Dunlop", "eer", "componental",
-    "spot", "pamphlet", "antineuritic", "paradisean",
-    "interruptor", "debellator", "overcultured", "Florissant",
-    "hyocholic", "pneumatotherapy", "tailoress", "rave",
-    "unpeople", "Sebastian", "thermanesthesia", "Coniferae",
-    "swacking", "posterishness", "ethmopalatal", "whittle",
-    "analgize", "scabbardless", "naught", "symbiogenetically",
-    "trip", "parodist", "columniform", "trunnel",
-    "yawler", "goodwill", "pseudohalogen", "swangy",
-    "cervisial", "mediateness", "genii", "imprescribable",
-    "pony", "consumptional", "carposporangial", "poleax",
-    "bestill", "subfebrile", "sapphiric", "arrowworm",
-    "qualminess", "ultraobscure", "thorite", "Fouquieria",
-    "Bermudian", "prescriber", "elemicin", "warlike",
-    "semiangle", "rotular", "misthread", "returnability",
-    "seraphism", "precostal", "quarried", "Babylonism",
-    "sangaree", "seelful", "placatory", "pachydermous",
-    "bozal", "galbulus", "spermaphyte", "cumbrousness",
-    "pope", "signifier", "Endomycetaceae", "shallowish",
-    "sequacity", "periarthritis", "bathysphere", "pentosuria",
-    "Dadaism", "spookdom", "Consolamentum", "afterpressure",
-    "mutter", "louse", "ovoviviparous", "corbel",
-    "metastoma", "biventer", "Hydrangea", "hogmace",
-    "seizing", "nonsuppressed", "oratorize", "uncarefully",
-    "benzothiofuran", "penult", "balanocele", "macropterous",
-    "dishpan", "marten", "absvolt", "jirble",
-    "parmelioid", "airfreighter", "acocotl", "archesporial",
-    "hypoplastral", "preoral", "quailberry", "cinque",
-    "terrestrially", "stroking", "limpet", "moodishness",
-    "canicule", "archididascalian", "pompiloid", "overstaid",
-    "introducer", "Italical", "Christianopaganism", "prescriptible",
-    "subofficer", "danseuse", "cloy", "saguran",
-    "frictionlessly", "deindividualization", "Bulanda", "ventricous",
-    "subfoliar", "basto", "scapuloradial", "suspend",
-    "stiffish", "Sphenodontidae", "eternal", "verbid",
-    "mammonish", "upcushion", "barkometer", "concretion",
-    "preagitate", "incomprehensible", "tristich", "visceral",
-    "hemimelus", "patroller", "stentorophonic", "pinulus",
-    "kerykeion", "brutism", "monstership", "merciful",
-    "overinstruct", "defensibly", "bettermost", "splenauxe",
-    "Mormyrus", "unreprimanded", "taver", "ell",
-    "proacquittal", "infestation", "overwoven", "Lincolnlike",
-    "chacona", "Tamil", "classificational", "lebensraum",
-    "reeveland", "intuition", "Whilkut", "focaloid",
-    "Eleusinian", "micromembrane", "byroad", "nonrepetition",
-    "bacterioblast", "brag", "ribaldrous", "phytoma",
-    "counteralliance", "pelvimetry", "pelf", "relaster",
-    "thermoresistant", "aneurism", "molossic", "euphonym",
-    "upswell", "ladhood", "phallaceous", "inertly",
-    "gunshop", "stereotypography", "laryngic", "refasten",
-    "twinling", "oflete", "hepatorrhaphy", "electrotechnics",
-    "cockal", "guitarist", "topsail", "Cimmerianism",
-    "larklike", "Llandovery", "pyrocatechol", "immatchable",
-    "chooser", "metrocratic", "craglike", "quadrennial",
-    "nonpoisonous", "undercolored", "knob", "ultratense",
-    "balladmonger", "slait", "sialadenitis", "bucketer",
-    "magnificently", "unstipulated", "unscourged", "unsupercilious",
-    "packsack", "pansophism", "soorkee", "percent",
-    "subirrigate", "champer", "metapolitics", "spherulitic",
-    "involatile", "metaphonical", "stachyuraceous", "speckedness",
-    "bespin", "proboscidiform", "gul", "squit",
-    "yeelaman", "peristeropode", "opacousness", "shibuichi",
-    "retinize", "yote", "misexposition", "devilwise",
-    "pumpkinification", "vinny", "bonze", "glossing",
-    "decardinalize", "transcortical", "serphoid", "deepmost",
-    "guanajuatite", "wemless", "arval", "lammy",
-    "Effie", "Saponaria", "tetrahedral", "prolificy",
-    "excerpt", "dunkadoo", "Spencerism", "insatiately",
-    "Gilaki", "oratorship", "arduousness", "unbashfulness",
-    "Pithecolobium", "unisexuality", "veterinarian", "detractive",
-    "liquidity", "acidophile", "proauction", "sural",
-    "totaquina", "Vichyite", "uninhabitedness", "allegedly",
-    "Gothish", "manny", "Inger", "flutist",
-    "ticktick", "Ludgatian", "homotransplant", "orthopedical",
-    "diminutively", "monogoneutic", "Kenipsim", "sarcologist",
-    "drome", "stronghearted", "Fameuse", "Swaziland",
-    "alen", "chilblain", "beatable", "agglomeratic",
-    "constitutor", "tendomucoid", "porencephalous", "arteriasis",
-    "boser", "tantivy", "rede", "lineamental",
-    "uncontradictableness", "homeotypical", "masa", "folious",
-    "dosseret", "neurodegenerative", "subtransverse", "Chiasmodontidae",
-    "palaeotheriodont", "unstressedly", "chalcites", "piquantness",
-    "lampyrine", "Aplacentalia", "projecting", "elastivity",
-    "isopelletierin", "bladderwort", "strander", "almud",
-    "iniquitously", "theologal", "bugre", "chargeably",
-    "imperceptivity", "meriquinoidal", "mesophyte", "divinator",
-    "perfunctory", "counterappellant", "synovial", "charioteer",
-    "crystallographical", "comprovincial", "infrastapedial", "pleasurehood",
-    "inventurous", "ultrasystematic", "subangulated", "supraoesophageal",
-    "Vaishnavism", "transude", "chrysochrous", "ungrave",
-    "reconciliable", "uninterpleaded", "erlking", "wherefrom",
-    "aprosopia", "antiadiaphorist", "metoxazine", "incalculable",
-    "umbellic", "predebit", "foursquare", "unimmortal",
-    "nonmanufacture", "slangy", "predisputant", "familist",
-    "preaffiliate", "friarhood", "corelysis", "zoonitic",
-    "halloo", "paunchy", "neuromimesis", "aconitine",
-    "hackneyed", "unfeeble", "cubby", "autoschediastical",
-    "naprapath", "lyrebird", "inexistency", "leucophoenicite",
-    "ferrogoslarite", "reperuse", "uncombable", "tambo",
-    "propodiale", "diplomatize", "Russifier", "clanned",
-    "corona", "michigan", "nonutilitarian", "transcorporeal",
-    "bought", "Cercosporella", "stapedius", "glandularly",
-    "pictorially", "weism", "disilane", "rainproof",
-    "Caphtor", "scrubbed", "oinomancy", "pseudoxanthine",
-    "nonlustrous", "redesertion", "Oryzorictinae", "gala",
-    "Mycogone", "reappreciate", "cyanoguanidine", "seeingness",
-    "breadwinner", "noreast", "furacious", "epauliere",
-    "omniscribent", "Passiflorales", "uninductive", "inductivity",
-    "Orbitolina", "Semecarpus", "migrainoid", "steprelationship",
-    "phlogisticate", "mesymnion", "sloped", "edificator",
-    "beneficent", "culm", "paleornithology", "unurban",
-    "throbless", "amplexifoliate", "sesquiquintile", "sapience",
-    "astucious", "dithery", "boor", "ambitus",
-    "scotching", "uloid", "uncompromisingness", "hoove",
-    "waird", "marshiness", "Jerusalem", "mericarp",
-    "unevoked", "benzoperoxide", "outguess", "pyxie",
-    "hymnic", "euphemize", "mendacity", "erythremia",
-    "rosaniline", "unchatteled", "lienteria", "Bushongo",
-    "dialoguer", "unrepealably", "rivethead", "antideflation",
-    "vinegarish", "manganosiderite", "doubtingness", "ovopyriform",
-    "Cephalodiscus", "Muscicapa", "Animalivora", "angina",
-    "planispheric", "ipomoein", "cuproiodargyrite", "sandbox",
-    "scrat", "Munnopsidae", "shola", "pentafid",
-    "overstudiousness", "times", "nonprofession", "appetible",
-    "valvulotomy", "goladar", "uniarticular", "oxyterpene",
-    "unlapsing", "omega", "trophonema", "seminonflammable",
-    "circumzenithal", "starer", "depthwise", "liberatress",
-    "unleavened", "unrevolting", "groundneedle", "topline",
-    "wandoo", "umangite", "ordinant", "unachievable",
-    "oversand", "snare", "avengeful", "unexplicit",
-    "mustafina", "sonable", "rehabilitative", "eulogization",
-    "papery", "technopsychology", "impressor", "cresylite",
-    "entame", "transudatory", "scotale", "pachydermatoid",
-    "imaginary", "yeat", "slipped", "stewardship",
-    "adatom", "cockstone", "skyshine", "heavenful",
-    "comparability", "exprobratory", "dermorhynchous", "parquet",
-    "cretaceous", "vesperal", "raphis", "undangered",
-    "Glecoma", "engrain", "counteractively", "Zuludom",
-    "orchiocatabasis", "Auriculariales", "warriorwise", "extraorganismal",
-    "overbuilt", "alveolite", "tetchy", "terrificness",
-    "widdle", "unpremonished", "rebilling", "sequestrum",
-    "equiconvex", "heliocentricism", "catabaptist", "okonite",
-    "propheticism", "helminthagogic", "calycular", "giantly",
-    "wingable", "golem", "unprovided", "commandingness",
-    "greave", "haply", "doina", "depressingly",
-    "subdentate", "impairment", "decidable", "neurotrophic",
-    "unpredict", "bicorporeal", "pendulant", "flatman",
-    "intrabred", "toplike", "Prosobranchiata", "farrantly",
-    "toxoplasmosis", "gorilloid", "dipsomaniacal", "aquiline",
-    "atlantite", "ascitic", "perculsive", "prospectiveness",
-    "saponaceous", "centrifugalization", "dinical", "infravaginal",
-    "beadroll", "affaite", "Helvidian", "tickleproof",
-    "abstractionism", "enhedge", "outwealth", "overcontribute",
-    "coldfinch", "gymnastic", "Pincian", "Munychian",
-    "codisjunct", "quad", "coracomandibular", "phoenicochroite",
-    "amender", "selectivity", "putative", "semantician",
-    "lophotrichic", "Spatangoidea", "saccharogenic", "inferent",
-    "Triconodonta", "arrendation", "sheepskin", "taurocolla",
-    "bunghole", "Machiavel", "triakistetrahedral", "dehairer",
-    "prezygapophysial", "cylindric", "pneumonalgia", "sleigher",
-    "emir", "Socraticism", "licitness", "massedly",
-    "instructiveness", "sturdied", "redecrease", "starosta",
-    "evictor", "orgiastic", "squdge", "meloplasty",
-    "Tsonecan", "repealableness", "swoony", "myesthesia",
-    "molecule", "autobiographist", "reciprocation", "refective",
-    "unobservantness", "tricae", "ungouged", "floatability",
-    "Mesua", "fetlocked", "chordacentrum", "sedentariness",
-    "various", "laubanite", "nectopod", "zenick",
-    "sequentially", "analgic", "biodynamics", "posttraumatic",
-    "nummi", "pyroacetic", "bot", "redescend",
-    "dispermy", "undiffusive", "circular", "trillion",
-    "Uraniidae", "ploration", "discipular", "potentness",
-    "sud", "Hu", "Eryon", "plugger",
-    "subdrainage", "jharal", "abscission", "supermarket",
-    "countergabion", "glacierist", "lithotresis", "minniebush",
-    "zanyism", "eucalypteol", "sterilely", "unrealize",
-    "unpatched", "hypochondriacism", "critically", "cheesecutter",
-  };
-}

+ 0 - 98
src/test/hdfs-with-mr/org/apache/hadoop/io/TestSequenceFileMergeProgress.java

@@ -1,98 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.io;
-
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.hadoop.fs.*;
-import org.apache.hadoop.io.*;
-import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.io.SequenceFile.Sorter.RawKeyValueIterator;
-import org.apache.hadoop.io.SequenceFile.Sorter.SegmentDescriptor;
-import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.io.compress.DefaultCodec;
-import org.apache.hadoop.mapred.*;
-
-import junit.framework.TestCase;
-import org.apache.commons.logging.*;
-
-public class TestSequenceFileMergeProgress extends TestCase {
-  private static final Log LOG = FileInputFormat.LOG;
-  private static final int RECORDS = 10000;
-  
-  public void testMergeProgressWithNoCompression() throws IOException {
-    runTest(SequenceFile.CompressionType.NONE);
-  }
-
-  public void testMergeProgressWithRecordCompression() throws IOException {
-    runTest(SequenceFile.CompressionType.RECORD);
-  }
-
-  public void testMergeProgressWithBlockCompression() throws IOException {
-    runTest(SequenceFile.CompressionType.BLOCK);
-  }
-
-  public void runTest(CompressionType compressionType) throws IOException {
-    JobConf job = new JobConf();
-    FileSystem fs = FileSystem.getLocal(job);
-    Path dir = new Path(System.getProperty("test.build.data",".") + "/mapred");
-    Path file = new Path(dir, "test.seq");
-    Path tempDir = new Path(dir, "tmp");
-
-    fs.delete(dir, true);
-    FileInputFormat.setInputPaths(job, dir);
-    fs.mkdirs(tempDir);
-
-    LongWritable tkey = new LongWritable();
-    Text tval = new Text();
-
-    SequenceFile.Writer writer =
-      SequenceFile.createWriter(fs, job, file, LongWritable.class, Text.class,
-        compressionType, new DefaultCodec());
-    try {
-      for (int i = 0; i < RECORDS; ++i) {
-        tkey.set(1234);
-        tval.set("valuevaluevaluevaluevaluevaluevaluevaluevaluevaluevalue");
-        writer.append(tkey, tval);
-      }
-    } finally {
-      writer.close();
-    }
-    
-    long fileLength = fs.getFileStatus(file).getLen();
-    LOG.info("With compression = " + compressionType + ": "
-        + "compressed length = " + fileLength);
-    
-    SequenceFile.Sorter sorter = new SequenceFile.Sorter(fs, 
-        job.getOutputKeyComparator(), job.getMapOutputKeyClass(),
-        job.getMapOutputValueClass(), job);
-    Path[] paths = new Path[] {file};
-    RawKeyValueIterator rIter = sorter.merge(paths, tempDir, false);
-    int count = 0;
-    while (rIter.next()) {
-      count++;
-    }
-    assertEquals(RECORDS, count);
-    assertEquals(1.0f, rIter.getProgress().get());
-  }
-
-}

+ 0 - 197
src/test/hdfs-with-mr/org/apache/hadoop/ipc/TestSocketFactory.java

@@ -1,197 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.net.Socket;
-import java.net.SocketAddress;
-
-import junit.framework.TestCase;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.mapred.JobClient;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.JobStatus;
-import org.apache.hadoop.mapred.MiniMRCluster;
-import org.apache.hadoop.net.StandardSocketFactory;
-
-/**
- * This class checks that RPCs can use specialized socket factories.
- */
-public class TestSocketFactory extends TestCase {
-
-  /**
-   * Check that we can reach a NameNode or a JobTracker using a specific
-   * socket factory
-   */
-  public void testSocketFactory() throws IOException {
-    // Create a standard mini-cluster
-    Configuration sconf = new Configuration();
-    MiniDFSCluster cluster = new MiniDFSCluster(sconf, 1, true, null);
-    final int nameNodePort = cluster.getNameNodePort();
-
-    // Get a reference to its DFS directly
-    FileSystem fs = cluster.getFileSystem();
-    assertTrue(fs instanceof DistributedFileSystem);
-    DistributedFileSystem directDfs = (DistributedFileSystem) fs;
-
-    // Get another reference via network using a specific socket factory
-    Configuration cconf = new Configuration();
-    FileSystem.setDefaultUri(cconf, String.format("hdfs://localhost:%s/",
-        nameNodePort + 10));
-    cconf.set("hadoop.rpc.socket.factory.class.default",
-        "org.apache.hadoop.ipc.DummySocketFactory");
-    cconf.set("hadoop.rpc.socket.factory.class.ClientProtocol",
-        "org.apache.hadoop.ipc.DummySocketFactory");
-    cconf.set("hadoop.rpc.socket.factory.class.JobSubmissionProtocol",
-        "org.apache.hadoop.ipc.DummySocketFactory");
-
-    fs = FileSystem.get(cconf);
-    assertTrue(fs instanceof DistributedFileSystem);
-    DistributedFileSystem dfs = (DistributedFileSystem) fs;
-
-    JobClient client = null;
-    MiniMRCluster mr = null;
-    try {
-      // This will test RPC to the NameNode only.
-      // could we test Client-DataNode connections?
-      Path filePath = new Path("/dir");
-
-      assertFalse(directDfs.exists(filePath));
-      assertFalse(dfs.exists(filePath));
-
-      directDfs.mkdirs(filePath);
-      assertTrue(directDfs.exists(filePath));
-      assertTrue(dfs.exists(filePath));
-
-      // This will test TPC to a JobTracker
-      fs = FileSystem.get(sconf);
-      mr = new MiniMRCluster(1, fs.getUri().toString(), 1);
-      final int jobTrackerPort = mr.getJobTrackerPort();
-
-      JobConf jconf = new JobConf(cconf);
-      jconf.set("mapred.job.tracker", String.format("localhost:%d",
-          jobTrackerPort + 10));
-      client = new JobClient(jconf);
-
-      JobStatus[] jobs = client.jobsToComplete();
-      assertTrue(jobs.length == 0);
-
-    } finally {
-      try {
-        if (client != null)
-          client.close();
-      } catch (Exception ignored) {
-        // nothing we can do
-        ignored.printStackTrace();
-      }
-      try {
-        if (dfs != null)
-          dfs.close();
-
-      } catch (Exception ignored) {
-        // nothing we can do
-        ignored.printStackTrace();
-      }
-      try {
-        if (directDfs != null)
-          directDfs.close();
-
-      } catch (Exception ignored) {
-        // nothing we can do
-        ignored.printStackTrace();
-      }
-      try {
-        if (cluster != null)
-          cluster.shutdown();
-
-      } catch (Exception ignored) {
-        // nothing we can do
-        ignored.printStackTrace();
-      }
-      if (mr != null) {
-        try {
-          mr.shutdown();
-        } catch (Exception ignored) {
-          ignored.printStackTrace();
-        }
-      }
-    }
-  }
-}
-
-/**
- * Dummy socket factory which shift TPC ports by subtracting 10 when
- * establishing a connection
- */
-class DummySocketFactory extends StandardSocketFactory {
-  /**
-   * Default empty constructor (for use with the reflection API).
-   */
-  public DummySocketFactory() {
-  }
-
-  /* @inheritDoc */
-  @Override
-  public Socket createSocket() throws IOException {
-    return new Socket() {
-      @Override
-      public void connect(SocketAddress addr, int timeout)
-          throws IOException {
-
-        assert (addr instanceof InetSocketAddress);
-        InetSocketAddress iaddr = (InetSocketAddress) addr;
-        SocketAddress newAddr = null;
-        if (iaddr.isUnresolved())
-          newAddr =
-              new InetSocketAddress(iaddr.getHostName(),
-                  iaddr.getPort() - 10);
-        else
-          newAddr =
-              new InetSocketAddress(iaddr.getAddress(), iaddr.getPort() - 10);
-        System.out.printf("Test socket: rerouting %s to %s\n", iaddr,
-            newAddr);
-        super.connect(newAddr, timeout);
-      }
-    };
-  }
-
-  /* @inheritDoc */
-  @Override
-  public boolean equals(Object obj) {
-    if (this == obj)
-      return true;
-    if (obj == null)
-      return false;
-    if (!(obj instanceof DummySocketFactory))
-      return false;
-    return true;
-  }
-
-  /* @inheritDoc */
-  @Override
-  public int hashCode() {
-    // Dummy hash code (to make find bugs happy)
-    return 53;
-  }
-}

+ 0 - 152
src/test/hdfs-with-mr/org/apache/hadoop/security/authorize/TestServiceLevelAuthorization.java

@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security.authorize;
-
-import java.io.File;
-import java.io.FileWriter;
-import java.io.IOException;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.HDFSPolicyProvider;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.tools.DFSAdmin;
-import org.apache.hadoop.ipc.RemoteException;
-import org.apache.hadoop.mapred.JobConf;
-import org.apache.hadoop.mapred.MiniMRCluster;
-import org.apache.hadoop.mapred.TestMiniMRWithDFS;
-import org.apache.hadoop.security.UnixUserGroupInformation;
-import org.apache.hadoop.util.StringUtils;
-
-import junit.framework.TestCase;
-
-public class TestServiceLevelAuthorization extends TestCase {
-  public void testServiceLevelAuthorization() throws Exception {
-    MiniDFSCluster dfs = null;
-    MiniMRCluster mr = null;
-    FileSystem fileSys = null;
-    try {
-      final int slaves = 4;
-
-      // Turn on service-level authorization
-      Configuration conf = new Configuration();
-      conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
-                    HadoopPolicyProvider.class, PolicyProvider.class);
-      conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
-                      true);
-      
-      // Start the mini clusters
-      dfs = new MiniDFSCluster(conf, slaves, true, null);
-      fileSys = dfs.getFileSystem();
-      JobConf mrConf = new JobConf(conf);
-      mr = new MiniMRCluster(slaves, fileSys.getUri().toString(), 1, 
-                             null, null, mrConf);
-
-      // Run examples
-      TestMiniMRWithDFS.runPI(mr, mr.createJobConf(mrConf));
-      TestMiniMRWithDFS.runWordCount(mr, mr.createJobConf(mrConf));
-    } finally {
-      if (dfs != null) { dfs.shutdown(); }
-      if (mr != null) { mr.shutdown();
-      }
-    }
-  }
-  
-  private static final String DUMMY_ACL = "nouser nogroup";
-  private static final String UNKNOWN_USER = "dev,null";
-  
-  private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
-    FileWriter fos = new FileWriter(policyFile);
-    PolicyProvider policyProvider = new HDFSPolicyProvider();
-    fos.write("<configuration>\n");
-    for (Service service : policyProvider.getServices()) {
-      String key = service.getServiceKey();
-      String value ="*";
-      if (key.equals("security.refresh.policy.protocol.acl")) {
-        value = DUMMY_ACL;
-      }
-      fos.write("<property><name>"+ key + "</name><value>" + value + 
-                "</value></property>\n");
-      System.err.println("<property><name>"+ key + "</name><value>" + value + 
-          "</value></property>\n");
-    }
-    fos.write("</configuration>\n");
-    fos.close();
-  }
-  
-  private void refreshPolicy(Configuration conf)  throws IOException {
-    DFSAdmin dfsAdmin = new DFSAdmin(conf);
-    dfsAdmin.refreshServiceAcl();
-  }
-  
-  public void testRefresh() throws Exception {
-    MiniDFSCluster dfs = null;
-    try {
-      final int slaves = 4;
-
-      // Turn on service-level authorization
-      Configuration conf = new Configuration();
-      conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG, 
-                    HDFSPolicyProvider.class, PolicyProvider.class);
-      conf.setBoolean(ServiceAuthorizationManager.SERVICE_AUTHORIZATION_CONFIG, 
-                      true);
-      
-      // Start the mini dfs cluster
-      dfs = new MiniDFSCluster(conf, slaves, true, null);
-
-      // Refresh the service level authorization policy
-      refreshPolicy(conf);
-      
-      // Simulate an 'edit' of hadoop-policy.xml
-      String confDir = System.getProperty("test.build.extraconf", 
-                                          "build/test/extraconf");
-      File policyFile = new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE);
-      String policyFileCopy = ConfiguredPolicy.HADOOP_POLICY_FILE + ".orig";
-      FileUtil.copy(policyFile, FileSystem.getLocal(conf),   // first save original 
-                    new Path(confDir, policyFileCopy), false, conf);
-      rewriteHadoopPolicyFile(                               // rewrite the file
-          new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE));
-      
-      // Refresh the service level authorization policy
-      refreshPolicy(conf);
-      
-      // Refresh the service level authorization policy once again, 
-      // this time it should fail!
-      try {
-        // Note: hadoop-policy.xml for tests has 
-        // security.refresh.policy.protocol.acl = ${user.name}
-        conf.set(UnixUserGroupInformation.UGI_PROPERTY_NAME, UNKNOWN_USER);
-        refreshPolicy(conf);
-        fail("Refresh of NameNode's policy file cannot be successful!");
-      } catch (RemoteException re) {
-        System.out.println("Good, refresh worked... refresh failed with: " + 
-                           StringUtils.stringifyException(re.unwrapRemoteException()));
-      } finally {
-        // Reset to original hadoop-policy.xml
-        FileUtil.fullyDelete(new File(confDir, 
-            ConfiguredPolicy.HADOOP_POLICY_FILE));
-        FileUtil.replaceFile(new File(confDir, policyFileCopy), new File(confDir, ConfiguredPolicy.HADOOP_POLICY_FILE));
-      }
-    } finally {
-      if (dfs != null) { dfs.shutdown(); }
-    }
-  }
-
-}

+ 0 - 46
src/test/hdfs-with-mr/org/apache/hadoop/test/AllTestDriver.java

@@ -1,46 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test;
-
-import org.apache.hadoop.util.ProgramDriver;
-
-
-@Deprecated
-//Class to be removed after the project split
-public class AllTestDriver {
-  
-  /**
-   * A description of the test program for running all the tests using jar file
-   */
-  public static void main(String argv[]){
-    ProgramDriver pd = new ProgramDriver();
-    new CoreTestDriver(pd);
-    new HdfsTestDriver(pd);
-    new HdfsWithMRTestDriver(pd);
-    new MapredTestDriver(pd);
-    
-    try {
-      pd.driver(argv);
-    } catch (Throwable e) {
-      e.printStackTrace();
-    }
-  }
-
-}
-

+ 0 - 75
src/test/hdfs-with-mr/org/apache/hadoop/test/HdfsWithMRTestDriver.java

@@ -1,75 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.test;
-
-import org.apache.hadoop.fs.DFSCIOTest;
-import org.apache.hadoop.fs.DistributedFSCheck;
-import org.apache.hadoop.fs.TestDFSIO;
-import org.apache.hadoop.fs.TestFileSystem;
-import org.apache.hadoop.hdfs.NNBench;
-import org.apache.hadoop.io.FileBench;
-import org.apache.hadoop.util.ProgramDriver;
-
-/*
- * Driver for HDFS tests, which require map-reduce to run.
- */
-public class HdfsWithMRTestDriver {
-  
-  
-  private ProgramDriver pgd;
-
-  public HdfsWithMRTestDriver() {
-    this(new ProgramDriver());
-  }
-  
-  public HdfsWithMRTestDriver(ProgramDriver pgd) {
-    this.pgd = pgd;
-    try {
-      pgd.addClass("nnbench", NNBench.class, 
-          "A benchmark that stresses the namenode.");
-      pgd.addClass("testfilesystem", TestFileSystem.class, 
-          "A test for FileSystem read/write.");
-      pgd.addClass("TestDFSIO", TestDFSIO.class, 
-          "Distributed i/o benchmark.");
-      pgd.addClass("DFSCIOTest", DFSCIOTest.class, "" +
-          "Distributed i/o benchmark of libhdfs.");
-      pgd.addClass("DistributedFSCheck", DistributedFSCheck.class, 
-          "Distributed checkup of the file system consistency.");
-      pgd.addClass("filebench", FileBench.class, 
-          "Benchmark SequenceFile(Input|Output)Format " +
-          "(block,record compressed and uncompressed), " +
-          "Text(Input|Output)Format (compressed and uncompressed)");
-    } catch(Throwable e) {
-      e.printStackTrace();
-    }
-  }
-
-  public void run(String argv[]) {
-    try {
-      pgd.driver(argv);
-    } catch(Throwable e) {
-      e.printStackTrace();
-    }
-  }
-
-  public static void main(String argv[]){
-    new HdfsWithMRTestDriver().run(argv);
-  }
-}
-

+ 0 - 221
src/test/hdfs-with-mr/org/apache/hadoop/tools/TestDistCh.java

@@ -1,221 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.tools;
-
-import java.io.ByteArrayOutputStream;
-import java.io.DataOutputStream;
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Arrays;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Random;
-
-import org.apache.commons.logging.LogFactory;
-import org.apache.commons.logging.impl.Log4JLogger;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FsShell;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.permission.PermissionStatus;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
-import org.apache.hadoop.io.IOUtils;
-import org.apache.hadoop.mapred.MiniMRCluster;
-import org.apache.hadoop.mapred.TaskTracker;
-import org.apache.log4j.Level;
-
-public class TestDistCh extends junit.framework.TestCase {
-  {
-    ((Log4JLogger)LogFactory.getLog("org.apache.hadoop.hdfs.StateChange")
-        ).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)DataNode.LOG).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)FSNamesystem.LOG).getLogger().setLevel(Level.OFF);
-    ((Log4JLogger)TaskTracker.LOG).getLogger().setLevel(Level.OFF);
-  }
-
-  static final Long RANDOM_NUMBER_GENERATOR_SEED = null;
-
-  private static final Random RANDOM = new Random();
-  static {
-    final long seed = RANDOM_NUMBER_GENERATOR_SEED == null?
-        RANDOM.nextLong(): RANDOM_NUMBER_GENERATOR_SEED;
-    System.out.println("seed=" + seed);
-    RANDOM.setSeed(seed);
-  }
-
-  static final String TEST_ROOT_DIR =
-    new Path(System.getProperty("test.build.data","/tmp")
-        ).toString().replace(' ', '+');
-
-  static final int NUN_SUBS = 5;
-
-  static class FileTree {
-    private final FileSystem fs;
-    private final String root;
-    private final Path rootdir;
-    private int fcount = 0;
-
-    Path createSmallFile(Path dir) throws IOException {
-      final Path f = new Path(dir, "f" + ++fcount);
-      assertTrue(!fs.exists(f));
-      final DataOutputStream out = fs.create(f);
-      try {
-        out.writeBytes("createSmallFile: f=" + f);
-      } finally {
-        out.close();
-      }
-      assertTrue(fs.exists(f));
-      return f;
-    }
-
-    Path mkdir(Path dir) throws IOException {
-      assertTrue(fs.mkdirs(dir));
-      assertTrue(fs.getFileStatus(dir).isDir());
-      return dir;
-    }
-    
-    FileTree(FileSystem fs, String name) throws IOException {
-      this.fs = fs;
-      this.root = "/test/" + name;
-      this.rootdir = mkdir(new Path(root));
-  
-      for(int i = 0; i < 3; i++) {
-        createSmallFile(rootdir);
-      }
-      
-      for(int i = 0; i < NUN_SUBS; i++) {
-        final Path sub = mkdir(new Path(root, "sub" + i));
-        int num_files = RANDOM.nextInt(3);
-        for(int j = 0; j < num_files; j++) {
-          createSmallFile(sub);
-        }
-      }
-      
-      System.out.println("rootdir = " + rootdir);
-    }
-  }
-
-  static class ChPermissionStatus extends PermissionStatus {
-    ChPermissionStatus(FileStatus filestatus) {
-      this(filestatus, "", "", "");
-    }
-
-    ChPermissionStatus(FileStatus filestatus, String owner, String group, String permission) {
-      super("".equals(owner)? filestatus.getOwner(): owner, 
-          "".equals(group)? filestatus.getGroup(): group,
-          "".equals(permission)? filestatus.getPermission(): new FsPermission(Short.parseShort(permission, 8)));
-    }
-  }
-  
-  public void testDistCh() throws Exception {
-    final Configuration conf = new Configuration();
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
-    final FileSystem fs = cluster.getFileSystem();
-    final MiniMRCluster mr = new MiniMRCluster(2, fs.getUri().toString(), 1);
-    final FsShell shell = new FsShell(conf);
-    
-    try {
-      final FileTree tree = new FileTree(fs, "testDistCh");
-      final FileStatus rootstatus = fs.getFileStatus(tree.rootdir);
-
-      runLsr(shell, tree.root, 0);
-
-      //generate random arguments
-      final String[] args = new String[RANDOM.nextInt(NUN_SUBS-1) + 1];
-      final PermissionStatus[] newstatus = new PermissionStatus[NUN_SUBS];
-      final List<Integer> indices = new LinkedList<Integer>();
-      for(int i = 0; i < NUN_SUBS; i++) {
-        indices.add(i);
-      }
-      for(int i = 0; i < args.length; i++) {
-        final int index = indices.remove(RANDOM.nextInt(indices.size()));
-        final String sub = "sub" + index;
-        final boolean changeOwner = RANDOM.nextBoolean();
-        final boolean changeGroup = RANDOM.nextBoolean();
-        final boolean changeMode = !changeOwner && !changeGroup? true: RANDOM.nextBoolean();
-        
-        final String owner = changeOwner? sub: "";
-        final String group = changeGroup? sub: "";
-        final String permission = changeMode? RANDOM.nextInt(8) + "" + RANDOM.nextInt(8) + "" + RANDOM.nextInt(8): "";
-
-        args[i] = tree.root + "/" + sub + ":" + owner + ":" + group + ":" + permission;
-        newstatus[index] = new ChPermissionStatus(rootstatus, owner, group, permission);
-      }
-      for(int i = 0; i < NUN_SUBS; i++) {
-        if (newstatus[i] == null) {
-          newstatus[i] = new ChPermissionStatus(rootstatus);
-        }
-      }
-      System.out.println("args=" + Arrays.asList(args).toString().replace(",", ",\n  "));
-      System.out.println("newstatus=" + Arrays.asList(newstatus).toString().replace(",", ",\n  "));
-
-      //run DistCh
-      new DistCh(mr.createJobConf()).run(args);
-      runLsr(shell, tree.root, 0);
-
-      //check results
-      for(int i = 0; i < NUN_SUBS; i++) {
-        Path sub = new Path(tree.root + "/sub" + i);
-        checkFileStatus(newstatus[i], fs.getFileStatus(sub));
-        for(FileStatus status : fs.listStatus(sub)) {
-          checkFileStatus(newstatus[i], status);
-        }
-      }
-    } finally {
-      cluster.shutdown();
-    }
-  }
-
-  static final FsPermission UMASK = FsPermission.createImmutable((short)0111);
-
-  static void checkFileStatus(PermissionStatus expected, FileStatus actual) {
-    assertEquals(expected.getUserName(), actual.getOwner());
-    assertEquals(expected.getGroupName(), actual.getGroup());
-    FsPermission perm = expected.getPermission(); 
-    if (!actual.isDir()) {
-      perm = perm.applyUMask(UMASK);
-    }
-    assertEquals(perm, actual.getPermission());
-  }
-
-  private static String runLsr(final FsShell shell, String root, int returnvalue
-      ) throws Exception {
-    System.out.println("root=" + root + ", returnvalue=" + returnvalue);
-    final ByteArrayOutputStream bytes = new ByteArrayOutputStream(); 
-    final PrintStream out = new PrintStream(bytes);
-    final PrintStream oldOut = System.out;
-    final PrintStream oldErr = System.err;
-    System.setOut(out);
-    System.setErr(out);
-    final String results;
-    try {
-      assertEquals(returnvalue, shell.run(new String[]{"-lsr", root}));
-      results = bytes.toString();
-    } finally {
-      IOUtils.closeStream(out);
-      System.setOut(oldOut);
-      System.setErr(oldErr);
-    }
-    System.out.println("results:\n" + results);
-    return results;
-  }
-}

+ 0 - 18
src/test/mapred-site.xml

@@ -1,18 +0,0 @@
-<?xml version="1.0"?>
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
-
-<!-- Put site-specific property overrides in this file. -->
-
-<configuration>
-
-<property>
-  <name>io.sort.mb</name>
-  <value>10</value>
-</property>
-<property>
-  <name>mapred.hosts.exclude</name>
-  <value>hosts.exclude</value>
-  <description></description>
-</property>
-
-</configuration>

+ 0 - 404
src/webapps/datanode/browseBlock.jsp

@@ -1,404 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="java.net.*"
-
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.security.AccessToken"
-  import="org.apache.hadoop.security.AccessTokenHandler"
-  import="org.apache.hadoop.util.*"
-%>
-
-<%!
-  static final DataNode datanode = DataNode.getDataNode();
-
-  public void generateFileDetails(JspWriter out, HttpServletRequest req) 
-    throws IOException {
-
-    long startOffset = 0;
-    int datanodePort;
-
-    final Long blockId = JspHelper.validateLong(req.getParameter("blockId"));
-    if (blockId == null) {
-      out.print("Invalid input (blockId absent)");
-      return;
-    }
-
-    String datanodePortStr = req.getParameter("datanodePort");
-    if (datanodePortStr == null) {
-      out.print("Invalid input (datanodePort absent)");
-      return;
-    }
-    datanodePort = Integer.parseInt(datanodePortStr);
-
-    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
-    int namenodeInfoPort = -1;
-    if (namenodeInfoPortStr != null)
-      namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
-
-    final int chunkSizeToView = JspHelper.string2ChunkSizeToView(req.getParameter("chunkSizeToView"));
-
-    String startOffsetStr = req.getParameter("startOffset");
-    if (startOffsetStr == null || Long.parseLong(startOffsetStr) < 0)
-      startOffset = 0;
-    else startOffset = Long.parseLong(startOffsetStr);
-    
-    final String filename = JspHelper.validatePath(
-        req.getParameter("filename"));
-    if (filename == null) {
-      out.print("Invalid input");
-      return;
-    }
-
-    String blockSizeStr = req.getParameter("blockSize"); 
-    long blockSize = 0;
-    if (blockSizeStr == null || blockSizeStr.length() == 0) {
-      out.print("Invalid input");
-      return;
-    } 
-    blockSize = Long.parseLong(blockSizeStr);
-
-    final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(), JspHelper.conf);
-    List<LocatedBlock> blocks = 
-      dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
-    //Add the various links for looking at the file contents
-    //URL for downloading the full file
-    String downloadUrl = "http://" + req.getServerName() + ":" +
-                         + req.getServerPort() + "/streamFile?" + "filename=" +
-                         URLEncoder.encode(filename, "UTF-8");
-    out.print("<a name=\"viewOptions\"></a>");
-    out.print("<a href=\"" + downloadUrl + "\">Download this file</a><br>");
-    
-    DatanodeInfo chosenNode;
-    //URL for TAIL 
-    LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
-    try {
-      chosenNode = JspHelper.bestNode(lastBlk);
-    } catch (IOException e) {
-      out.print(e.toString());
-      dfs.close();
-      return;
-    }
-    String fqdn = 
-           InetAddress.getByName(chosenNode.getHost()).getCanonicalHostName();
-    String tailUrl = "http://" + fqdn + ":" +
-                     chosenNode.getInfoPort() + 
-                 "/tail.jsp?filename=" + URLEncoder.encode(filename, "UTF-8") +
-                 "&namenodeInfoPort=" + namenodeInfoPort +
-                 "&chunkSizeToView=" + chunkSizeToView +
-                 "&referrer=" + 
-          URLEncoder.encode(req.getRequestURL() + "?" + req.getQueryString(),
-                            "UTF-8");
-    out.print("<a href=\"" + tailUrl + "\">Tail this file</a><br>");
-
-    out.print("<form action=\"/browseBlock.jsp\" method=GET>");
-    out.print("<b>Chunk size to view (in bytes, up to file's DFS block size): </b>");
-    out.print("<input type=\"hidden\" name=\"blockId\" value=\"" + blockId +
-              "\">");
-    out.print("<input type=\"hidden\" name=\"blockSize\" value=\"" + 
-              blockSize + "\">");
-    out.print("<input type=\"hidden\" name=\"startOffset\" value=\"" + 
-              startOffset + "\">");
-    out.print("<input type=\"hidden\" name=\"filename\" value=\"" + filename +
-              "\">");
-    out.print("<input type=\"hidden\" name=\"datanodePort\" value=\"" + 
-              datanodePort+ "\">");
-    out.print("<input type=\"hidden\" name=\"namenodeInfoPort\" value=\"" +
-              namenodeInfoPort + "\">");
-    out.print("<input type=\"text\" name=\"chunkSizeToView\" value=" +
-              chunkSizeToView + " size=10 maxlength=10>");
-    out.print("&nbsp;&nbsp;<input type=\"submit\" name=\"submit\" value=\"Refresh\">");
-    out.print("</form>");
-    out.print("<hr>"); 
-    out.print("<a name=\"blockDetails\"></a>");
-    out.print("<B>Total number of blocks: "+blocks.size()+"</B><br>");
-    //generate a table and dump the info
-    out.println("\n<table>");
-    for (LocatedBlock cur : blocks) {
-      out.print("<tr>");
-      final String blockidstring = Long.toString(cur.getBlock().getBlockId());
-      blockSize = cur.getBlock().getNumBytes();
-      out.print("<td>"+blockidstring+":</td>");
-      DatanodeInfo[] locs = cur.getLocations();
-      for(int j=0; j<locs.length; j++) {
-        String datanodeAddr = locs[j].getName();
-        datanodePort = Integer.parseInt(datanodeAddr.substring(
-                                        datanodeAddr.indexOf(':') + 1, 
-                                    datanodeAddr.length())); 
-        fqdn = InetAddress.getByName(locs[j].getHost()).getCanonicalHostName();
-        String blockUrl = "http://"+ fqdn + ":" +
-                        locs[j].getInfoPort() +
-                        "/browseBlock.jsp?blockId=" + blockidstring +
-                        "&blockSize=" + blockSize +
-               "&filename=" + URLEncoder.encode(filename, "UTF-8")+ 
-                        "&datanodePort=" + datanodePort + 
-                        "&genstamp=" + cur.getBlock().getGenerationStamp() + 
-                        "&namenodeInfoPort=" + namenodeInfoPort +
-                        "&chunkSizeToView=" + chunkSizeToView;
-        out.print("<td>&nbsp</td>" 
-          + "<td><a href=\"" + blockUrl + "\">" + datanodeAddr + "</a></td>");
-      }
-      out.println("</tr>");
-    }
-    out.println("</table>");
-    out.print("<hr>");
-    String namenodeHost = datanode.getNameNodeAddr().getHostName();
-    out.print("<br><a href=\"http://" + 
-              InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":" +
-              namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
-    dfs.close();
-  }
-
-  public void generateFileChunks(JspWriter out, HttpServletRequest req) 
-    throws IOException {
-    long startOffset = 0;
-    int datanodePort = 0; 
-
-    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
-    int namenodeInfoPort = -1;
-    if (namenodeInfoPortStr != null)
-      namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
-
-    final String filename = JspHelper.validatePath(
-        req.getParameter("filename"));
-    if (filename == null) {
-      out.print("Invalid input (filename absent)");
-      return;
-    }
-    
-    final Long blockId = JspHelper.validateLong(req.getParameter("blockId"));
-    if (blockId == null) {
-      out.print("Invalid input (blockId absent)");
-      return;
-    }
-
-    final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(), JspHelper.conf);
-    
-    AccessToken accessToken = AccessToken.DUMMY_TOKEN;
-    if (JspHelper.conf
-        .getBoolean(AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false)) {
-      List<LocatedBlock> blks = dfs.namenode.getBlockLocations(filename, 0,
-          Long.MAX_VALUE).getLocatedBlocks();
-      if (blks == null || blks.size() == 0) {
-        out.print("Can't locate file blocks");
-        dfs.close();
-        return;
-      }
-      for (int i = 0; i < blks.size(); i++) {
-        if (blks.get(i).getBlock().getBlockId() == blockId) {
-          accessToken = blks.get(i).getAccessToken();
-          break;
-        }
-      }
-    }
-    
-    final Long genStamp = JspHelper.validateLong(req.getParameter("genstamp"));
-    if (genStamp == null) {
-      out.print("Invalid input (genstamp absent)");
-      return;
-    }
-
-    String blockSizeStr;
-    long blockSize = 0;
-    blockSizeStr = req.getParameter("blockSize"); 
-    if (blockSizeStr == null) {
-      out.print("Invalid input (blockSize absent)");
-      return;
-    }
-    blockSize = Long.parseLong(blockSizeStr);
-    
-    final int chunkSizeToView = JspHelper.string2ChunkSizeToView(req.getParameter("chunkSizeToView"));
-
-    String startOffsetStr = req.getParameter("startOffset");
-    if (startOffsetStr == null || Long.parseLong(startOffsetStr) < 0)
-      startOffset = 0;
-    else startOffset = Long.parseLong(startOffsetStr);
-
-    String datanodePortStr = req.getParameter("datanodePort");
-    if (datanodePortStr == null) {
-      out.print("Invalid input (datanodePort absent)");
-      return;
-    }
-    datanodePort = Integer.parseInt(datanodePortStr);
-    out.print("<h3>File: ");
-    JspHelper.printPathWithLinks(filename, out, namenodeInfoPort);
-    out.print("</h3><hr>");
-    String parent = new File(filename).getParent();
-    JspHelper.printGotoForm(out, namenodeInfoPort, parent);
-    out.print("<hr>");
-    out.print("<a href=\"http://" + req.getServerName() + ":" + 
-              req.getServerPort() + 
-              "/browseDirectory.jsp?dir=" + 
-              URLEncoder.encode(parent, "UTF-8") +
-              "&namenodeInfoPort=" + namenodeInfoPort + 
-              "\"><i>Go back to dir listing</i></a><br>");
-    out.print("<a href=\"#viewOptions\">Advanced view/download options</a><br>");
-    out.print("<hr>");
-
-    //Determine the prev & next blocks
-    long nextStartOffset = 0;
-    long nextBlockSize = 0;
-    String nextBlockIdStr = null;
-    String nextGenStamp = null;
-    String nextHost = req.getServerName();
-    int nextPort = req.getServerPort();
-    int nextDatanodePort = datanodePort;
-    //determine data for the next link
-    if (startOffset + chunkSizeToView >= blockSize) {
-      //we have to go to the next block from this point onwards
-      List<LocatedBlock> blocks = 
-        dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
-      for (int i = 0; i < blocks.size(); i++) {
-        if (blocks.get(i).getBlock().getBlockId() == blockId) {
-          if (i != blocks.size() - 1) {
-            LocatedBlock nextBlock = blocks.get(i+1);
-            nextBlockIdStr = Long.toString(nextBlock.getBlock().getBlockId());
-            nextGenStamp = Long.toString(nextBlock.getBlock().getGenerationStamp());
-            nextStartOffset = 0;
-            nextBlockSize = nextBlock.getBlock().getNumBytes();
-            DatanodeInfo d = JspHelper.bestNode(nextBlock);
-            String datanodeAddr = d.getName();
-            nextDatanodePort = Integer.parseInt(
-                                      datanodeAddr.substring(
-                                           datanodeAddr.indexOf(':') + 1, 
-                                      datanodeAddr.length())); 
-            nextHost = InetAddress.getByName(d.getHost()).getCanonicalHostName();
-            nextPort = d.getInfoPort(); 
-          }
-        }
-      }
-    } 
-    else {
-      //we are in the same block
-      nextBlockIdStr = blockId.toString();
-      nextStartOffset = startOffset + chunkSizeToView;
-      nextBlockSize = blockSize;
-      nextGenStamp = genStamp.toString();
-    }
-    String nextUrl = null;
-    if (nextBlockIdStr != null) {
-      nextUrl = "http://" + nextHost + ":" + 
-                nextPort + 
-                "/browseBlock.jsp?blockId=" + nextBlockIdStr +
-                "&blockSize=" + nextBlockSize + "&startOffset=" + 
-                nextStartOffset + 
-                "&genstamp=" + nextGenStamp +
-                "&filename=" + URLEncoder.encode(filename, "UTF-8") +
-                "&chunkSizeToView=" + chunkSizeToView + 
-                "&datanodePort=" + nextDatanodePort +
-                "&namenodeInfoPort=" + namenodeInfoPort;
-      out.print("<a href=\"" + nextUrl + "\">View Next chunk</a>&nbsp;&nbsp;");        
-    }
-    //determine data for the prev link
-    String prevBlockIdStr = null;
-    String prevGenStamp = null;
-    long prevStartOffset = 0;
-    long prevBlockSize = 0;
-    String prevHost = req.getServerName();
-    int prevPort = req.getServerPort();
-    int prevDatanodePort = datanodePort;
-    if (startOffset == 0) {
-      List<LocatedBlock> blocks = 
-        dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
-      for (int i = 0; i < blocks.size(); i++) {
-        if (blocks.get(i).getBlock().getBlockId() == blockId) {
-          if (i != 0) {
-            LocatedBlock prevBlock = blocks.get(i-1);
-            prevBlockIdStr = Long.toString(prevBlock.getBlock().getBlockId());
-            prevGenStamp = Long.toString(prevBlock.getBlock().getGenerationStamp());
-            prevStartOffset = prevBlock.getBlock().getNumBytes() - chunkSizeToView;
-            if (prevStartOffset < 0)
-              prevStartOffset = 0;
-            prevBlockSize = prevBlock.getBlock().getNumBytes();
-            DatanodeInfo d = JspHelper.bestNode(prevBlock);
-            String datanodeAddr = d.getName();
-            prevDatanodePort = Integer.parseInt(
-                                      datanodeAddr.substring(
-                                          datanodeAddr.indexOf(':') + 1, 
-                                      datanodeAddr.length())); 
-            prevHost = InetAddress.getByName(d.getHost()).getCanonicalHostName();
-            prevPort = d.getInfoPort();
-          }
-        }
-      }
-    }
-    else {
-      //we are in the same block
-      prevBlockIdStr = blockId.toString();
-      prevStartOffset = startOffset - chunkSizeToView;
-      if (prevStartOffset < 0) prevStartOffset = 0;
-      prevBlockSize = blockSize;
-      prevGenStamp = genStamp.toString();
-    }
-
-    String prevUrl = null;
-    if (prevBlockIdStr != null) {
-      prevUrl = "http://" + prevHost + ":" + 
-                prevPort + 
-                "/browseBlock.jsp?blockId=" + prevBlockIdStr + 
-                "&blockSize=" + prevBlockSize + "&startOffset=" + 
-                prevStartOffset + 
-                "&filename=" + URLEncoder.encode(filename, "UTF-8") + 
-                "&chunkSizeToView=" + chunkSizeToView +
-                "&genstamp=" + prevGenStamp +
-                "&datanodePort=" + prevDatanodePort +
-                "&namenodeInfoPort=" + namenodeInfoPort;
-      out.print("<a href=\"" + prevUrl + "\">View Prev chunk</a>&nbsp;&nbsp;");
-    }
-    out.print("<hr>");
-    out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>");
-    try {
-    JspHelper.streamBlockInAscii(
-            new InetSocketAddress(req.getServerName(), datanodePort), blockId, 
-            accessToken, genStamp, blockSize, startOffset, chunkSizeToView, out);
-    } catch (Exception e){
-        out.print(e);
-    }
-    out.print("</textarea>");
-    dfs.close();
-  }
-
-%>
-<html>
-<head>
-<%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
-</head>
-<body onload="document.goto.dir.focus()">
-<% 
-   generateFileChunks(out,request);
-%>
-<hr>
-<% 
-   generateFileDetails(out,request);
-%>
-
-<h2>Local logs</h2>
-<a href="/logs/">Log</a> directory
-
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 192
src/webapps/datanode/browseDirectory.jsp

@@ -1,192 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="java.net.*"
-
-  import="org.apache.hadoop.fs.*"
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.util.*"
-%>
-<%!
-  static final DataNode datanode = DataNode.getDataNode();
-  
-  public void generateDirectoryStructure( JspWriter out, 
-                                          HttpServletRequest req,
-                                          HttpServletResponse resp) 
-    throws IOException {
-    final String dir = JspHelper.validatePath(req.getParameter("dir"));
-    if (dir == null) {
-      out.print("Invalid input");
-      return;
-    }
-    
-    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
-    int namenodeInfoPort = -1;
-    if (namenodeInfoPortStr != null)
-      namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
-    
-    final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(), JspHelper.conf);
-    String target = dir;
-    final FileStatus targetStatus = dfs.getFileInfo(target);
-    if (targetStatus == null) { // not exists
-      out.print("<h3>File or directory : " + target + " does not exist</h3>");
-      JspHelper.printGotoForm(out, namenodeInfoPort, target);
-    }
-    else {
-      if( !targetStatus.isDir() ) { // a file
-        List<LocatedBlock> blocks = 
-          dfs.namenode.getBlockLocations(dir, 0, 1).getLocatedBlocks();
-	      
-        LocatedBlock firstBlock = null;
-        DatanodeInfo [] locations = null;
-        if (blocks.size() > 0) {
-          firstBlock = blocks.get(0);
-          locations = firstBlock.getLocations();
-        }
-        if (locations == null || locations.length == 0) {
-          out.print("Empty file");
-        } else {
-          DatanodeInfo chosenNode = JspHelper.bestNode(firstBlock);
-          String fqdn = InetAddress.getByName(chosenNode.getHost()).
-            getCanonicalHostName();
-          String datanodeAddr = chosenNode.getName();
-          int datanodePort = Integer.parseInt(
-                                              datanodeAddr.substring(
-                                                                     datanodeAddr.indexOf(':') + 1, 
-                                                                     datanodeAddr.length())); 
-          String redirectLocation = "http://"+fqdn+":" +
-            chosenNode.getInfoPort() + 
-            "/browseBlock.jsp?blockId=" +
-            firstBlock.getBlock().getBlockId() +
-            "&blockSize=" + firstBlock.getBlock().getNumBytes() +
-            "&genstamp=" + firstBlock.getBlock().getGenerationStamp() +
-            "&filename=" + URLEncoder.encode(dir, "UTF-8") + 
-            "&datanodePort=" + datanodePort + 
-            "&namenodeInfoPort=" + namenodeInfoPort;
-          resp.sendRedirect(redirectLocation);
-        }
-        return;
-      }
-      // directory
-      FileStatus[] files = dfs.listPaths(target);
-      //generate a table and dump the info
-      String [] headings = { "Name", "Type", "Size", "Replication", 
-                              "Block Size", "Modification Time",
-                              "Permission", "Owner", "Group" };
-      out.print("<h3>Contents of directory ");
-      JspHelper.printPathWithLinks(dir, out, namenodeInfoPort);
-      out.print("</h3><hr>");
-      JspHelper.printGotoForm(out, namenodeInfoPort, dir);
-      out.print("<hr>");
-	
-      File f = new File(dir);
-      String parent;
-      if ((parent = f.getParent()) != null)
-        out.print("<a href=\"" + req.getRequestURL() + "?dir=" + parent +
-                  "&namenodeInfoPort=" + namenodeInfoPort +
-                  "\">Go to parent directory</a><br>");
-	
-      if (files == null || files.length == 0) {
-        out.print("Empty directory");
-      }
-      else {
-        JspHelper.addTableHeader(out);
-        int row=0;
-        JspHelper.addTableRow(out, headings, row++);
-        String cols [] = new String[headings.length];
-        for (int i = 0; i < files.length; i++) {
-          //Get the location of the first block of the file
-          if (files[i].getPath().toString().endsWith(".crc")) continue;
-          if (!files[i].isDir()) {
-            cols[1] = "file";
-            cols[2] = StringUtils.byteDesc(files[i].getLen());
-            cols[3] = Short.toString(files[i].getReplication());
-            cols[4] = StringUtils.byteDesc(files[i].getBlockSize());
-          }
-          else {
-            cols[1] = "dir";
-            cols[2] = "";
-            cols[3] = "";
-            cols[4] = "";
-          }
-          String datanodeUrl = req.getRequestURL()+"?dir="+
-              URLEncoder.encode(files[i].getPath().toString(), "UTF-8") + 
-              "&namenodeInfoPort=" + namenodeInfoPort;
-          cols[0] = "<a href=\""+datanodeUrl+"\">"+files[i].getPath().getName()+"</a>";
-          cols[5] = FsShell.dateForm.format(new Date((files[i].getModificationTime())));
-          cols[6] = files[i].getPermission().toString();
-          cols[7] = files[i].getOwner();
-          cols[8] = files[i].getGroup();
-          JspHelper.addTableRow(out, cols, row++);
-        }
-        JspHelper.addTableFooter(out);
-      }
-    } 
-    String namenodeHost = datanode.getNameNodeAddr().getHostName();
-    out.print("<br><a href=\"http://" + 
-              InetAddress.getByName(namenodeHost).getCanonicalHostName() + ":" +
-              namenodeInfoPort + "/dfshealth.jsp\">Go back to DFS home</a>");
-    dfs.close();
-  }
-
-%>
-
-<html>
-<head>
-<style type=text/css>
-<!--
-body 
-  {
-  font-face:sanserif;
-  }
--->
-</style>
-<%JspHelper.createTitle(out, request, request.getParameter("dir")); %>
-</head>
-
-<body onload="document.goto.dir.focus()">
-<% 
-  try {
-    generateDirectoryStructure(out,request,response);
-  }
-  catch(IOException ioe) {
-    String msg = ioe.getLocalizedMessage();
-    int i = msg.indexOf("\n");
-    if (i >= 0) {
-      msg = msg.substring(0, i);
-    }
-    out.print("<h3>" + msg + "</h3>");
-  }
-%>
-<hr>
-
-<h2>Local logs</h2>
-<a href="/logs/">Log</a> directory
-
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 135
src/webapps/datanode/tail.jsp

@@ -1,135 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="java.net.*"
-
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.security.AccessToken"
-  import="org.apache.hadoop.util.*"
-  import="org.apache.hadoop.net.NetUtils"
-%>
-
-<%!
-  static final DataNode datanode = DataNode.getDataNode();
-
-  public void generateFileChunks(JspWriter out, HttpServletRequest req) 
-    throws IOException {
-    final String referrer = JspHelper.validateURL(req.getParameter("referrer"));
-    boolean noLink = false;
-    if (referrer == null) {
-      noLink = true;
-    }
-
-    final String filename = JspHelper.validatePath(
-        req.getParameter("filename"));
-    if (filename == null) {
-      out.print("Invalid input (file name absent)");
-      return;
-    }
-
-    String namenodeInfoPortStr = req.getParameter("namenodeInfoPort");
-    int namenodeInfoPort = -1;
-    if (namenodeInfoPortStr != null)
-      namenodeInfoPort = Integer.parseInt(namenodeInfoPortStr);
-    
-    final int chunkSizeToView = JspHelper.string2ChunkSizeToView(req.getParameter("chunkSizeToView"));
-
-    if (!noLink) {
-      out.print("<h3>Tail of File: ");
-      JspHelper.printPathWithLinks(filename, out, namenodeInfoPort);
-	    out.print("</h3><hr>");
-      out.print("<a href=\"" + referrer + "\">Go Back to File View</a><hr>");
-    }
-    else {
-      out.print("<h3>" + filename + "</h3>");
-    }
-    out.print("<b>Chunk size to view (in bytes, up to file's DFS block size): </b>");
-    out.print("<input type=\"text\" name=\"chunkSizeToView\" value=" +
-              chunkSizeToView + " size=10 maxlength=10>");
-    out.print("&nbsp;&nbsp;<input type=\"submit\" name=\"submit\" value=\"Refresh\"><hr>");
-    out.print("<input type=\"hidden\" name=\"filename\" value=\"" + filename +
-              "\">");
-    out.print("<input type=\"hidden\" name=\"namenodeInfoPort\" value=\"" + namenodeInfoPort +
-    "\">");
-    if (!noLink)
-      out.print("<input type=\"hidden\" name=\"referrer\" value=\"" + 
-                referrer+ "\">");
-
-    //fetch the block from the datanode that has the last block for this file
-    final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(), JspHelper.conf);
-    List<LocatedBlock> blocks = 
-      dfs.namenode.getBlockLocations(filename, 0, Long.MAX_VALUE).getLocatedBlocks();
-    if (blocks == null || blocks.size() == 0) {
-      out.print("No datanodes contain blocks of file "+filename);
-      dfs.close();
-      return;
-    }
-    LocatedBlock lastBlk = blocks.get(blocks.size() - 1);
-    long blockSize = lastBlk.getBlock().getNumBytes();
-    long blockId = lastBlk.getBlock().getBlockId();
-    AccessToken accessToken = lastBlk.getAccessToken();
-    long genStamp = lastBlk.getBlock().getGenerationStamp();
-    DatanodeInfo chosenNode;
-    try {
-      chosenNode = JspHelper.bestNode(lastBlk);
-    } catch (IOException e) {
-      out.print(e.toString());
-      dfs.close();
-      return;
-    }      
-    InetSocketAddress addr = NetUtils.createSocketAddr(chosenNode.getName());
-    //view the last chunkSizeToView bytes while Tailing
-    final long startOffset = blockSize >= chunkSizeToView? blockSize - chunkSizeToView: 0;
-
-    out.print("<textarea cols=\"100\" rows=\"25\" wrap=\"virtual\" style=\"width:100%\" READONLY>");
-    JspHelper.streamBlockInAscii(addr, blockId, accessToken, genStamp, blockSize, startOffset, chunkSizeToView, out);
-    out.print("</textarea>");
-    dfs.close();
-  }
-
-%>
-
-
-
-<html>
-<head>
-<%JspHelper.createTitle(out, request, request.getParameter("filename")); %>
-</head>
-<body>
-<form action="/tail.jsp" method="GET">
-<% 
-   generateFileChunks(out,request);
-%>
-</form>
-<hr>
-
-<h2>Local logs</h2>
-<a href="/logs/">Log</a> directory
-
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 280
src/webapps/hdfs/dfshealth.jsp

@@ -1,280 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.fs.*"
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.server.datanode.*"
-  import="org.apache.hadoop.hdfs.server.common.Storage"
-  import="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.DateFormat"
-  import="java.lang.Math"
-  import="java.net.URLEncoder"
-%>
-<%!
-  int rowNum = 0;
-  int colNum = 0;
-
-  String rowTxt() { colNum = 0;
-      return "<tr class=\"" + (((rowNum++)%2 == 0)? "rowNormal" : "rowAlt")
-          + "\"> "; }
-  String colTxt() { return "<td id=\"col" + ++colNum + "\"> "; }
-  void counterReset () { colNum = 0; rowNum = 0 ; }
-
-  long diskBytes = 1024 * 1024 * 1024;
-  String diskByteStr = "GB";
-
-  String sorterField = null;
-  String sorterOrder = null;
-
-  String NodeHeaderStr(String name) {
-      String ret = "class=header";
-      String order = "ASC";
-      if ( name.equals( sorterField ) ) {
-          ret += sorterOrder;
-          if ( sorterOrder.equals("ASC") )
-              order = "DSC";
-      }
-      ret += " onClick=\"window.document.location=" +
-          "'/dfshealth.jsp?sorter/field=" + name + "&sorter/order=" +
-          order + "'\" title=\"sort on this column\"";
-      
-      return ret;
-  }
-      
-  public void generateNodeData( JspWriter out, DatanodeDescriptor d,
-                                    String suffix, boolean alive,
-                                    int nnHttpPort )
-    throws IOException {
-      
-    /* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5
-       we use:
-       1) d.getHostName():d.getPort() to display.
-           Domain and port are stripped if they are common across the nodes.
-           i.e. "dn1"
-       2) d.getHost():d.Port() for "title".
-          i.e. "192.168.0.5:50010"
-       3) d.getHostName():d.getInfoPort() for url.
-          i.e. "http://dn1.hadoop.apache.org:50075/..."
-          Note that "d.getHost():d.getPort()" is what DFS clients use
-          to interact with datanodes.
-    */
-    // from nn_browsedfscontent.jsp:
-    String url = "http://" + d.getHostName() + ":" + d.getInfoPort() +
-                 "/browseDirectory.jsp?namenodeInfoPort=" +
-                 nnHttpPort + "&dir=" +
-                 URLEncoder.encode("/", "UTF-8");
-     
-    String name = d.getHostName() + ":" + d.getPort();
-    if ( !name.matches( "\\d+\\.\\d+.\\d+\\.\\d+.*" ) ) 
-        name = name.replaceAll( "\\.[^.:]*", "" );    
-    int idx = (suffix != null && name.endsWith( suffix )) ?
-        name.indexOf( suffix ) : -1;
-    
-    out.print( rowTxt() + "<td class=\"name\"><a title=\""
-               + d.getHost() + ":" + d.getPort() +
-               "\" href=\"" + url + "\">" +
-               (( idx > 0 ) ? name.substring(0, idx) : name) + "</a>" +
-               (( alive ) ? "" : "\n") );
-    if ( !alive )
-        return;
-    
-    long c = d.getCapacity();
-    long u = d.getDfsUsed();
-    long nu = d.getNonDfsUsed();
-    long r = d.getRemaining();
-    String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent());    
-    String percentRemaining = StringUtils.limitDecimalTo2(d.getRemainingPercent());    
-    
-    String adminState = (d.isDecommissioned() ? "Decommissioned" :
-                         (d.isDecommissionInProgress() ? "Decommission In Progress":
-                          "In Service"));
-    
-    long timestamp = d.getLastUpdate();
-    long currentTime = System.currentTimeMillis();
-    out.print("<td class=\"lastcontact\"> " +
-              ((currentTime - timestamp)/1000) +
-              "<td class=\"adminstate\">" +
-              adminState +
-              "<td align=\"right\" class=\"capacity\">" +
-              StringUtils.limitDecimalTo2(c*1.0/diskBytes) +
-              "<td align=\"right\" class=\"used\">" +
-              StringUtils.limitDecimalTo2(u*1.0/diskBytes) +      
-              "<td align=\"right\" class=\"nondfsused\">" +
-              StringUtils.limitDecimalTo2(nu*1.0/diskBytes) +      
-              "<td align=\"right\" class=\"remaining\">" +
-              StringUtils.limitDecimalTo2(r*1.0/diskBytes) +      
-              "<td align=\"right\" class=\"pcused\">" + percentUsed +
-              "<td class=\"pcused\">" +
-              ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) +
-              "<td align=\"right\" class=\"pcremaining`\">" + percentRemaining +
-              "<td title=" + "\"blocks scheduled : " + d.getBlocksScheduled() + 
-              "\" class=\"blocks\">" + d.numBlocks() + "\n");
-  }
-  
-  
-  public void generateConfReport( JspWriter out,
-		  NameNode nn,
-		  HttpServletRequest request)
-  throws IOException {
-	  FSNamesystem fsn = nn.getNamesystem();
-	  long underReplicatedBlocks = fsn.getUnderReplicatedBlocks();
-	  FSImage fsImage = fsn.getFSImage();
-	  List<Storage.StorageDirectory> removedStorageDirs = fsImage.getRemovedStorageDirs();
-	  String storageDirsSizeStr="", removedStorageDirsSizeStr="", storageDirsStr="", removedStorageDirsStr="", storageDirsDiv="", removedStorageDirsDiv="";
-
-	  //FS Image storage configuration
-	  out.print("<h3> " + nn.getRole() + " Storage: </h3>");
-	  out.print("<div id=\"dfstable\"> <table border=1 cellpadding=10 cellspacing=0 title=\"NameNode Storage\">\n"+
-	  "<thead><tr><td><b>Storage Directory</b></td><td><b>Type</b></td><td><b>State</b></td></tr></thead>");
-	  
-	  StorageDirectory st =null;
-	  for (Iterator<StorageDirectory> it = fsImage.dirIterator(); it.hasNext();) {
-	      st = it.next();
-	      String dir = "" +  st.getRoot();
-		  String type = "" + st.getStorageDirType();
-		  out.print("<tr><td>"+dir+"</td><td>"+type+"</td><td>Active</td></tr>");
-	  }
-	  
-	  long storageDirsSize = removedStorageDirs.size();
-	  for(int i=0; i< storageDirsSize; i++){
-		  st = removedStorageDirs.get(i);
-		  String dir = "" +  st.getRoot();
-		  String type = "" + st.getStorageDirType();
-		  out.print("<tr><td>"+dir+"</td><td>"+type+"</td><td><font color=red>Failed</font></td></tr>");
-	  }
-	  
-	  out.print("</table></div><br>\n");
-  }
-
-
-  public void generateDFSHealthReport(JspWriter out,
-                                      NameNode nn,
-                                      HttpServletRequest request)
-                                      throws IOException {
-    FSNamesystem fsn = nn.getNamesystem();
-    ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();
-    ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
-    fsn.DFSNodesStatus(live, dead);
-
-    sorterField = request.getParameter("sorter/field");
-    sorterOrder = request.getParameter("sorter/order");
-    if ( sorterField == null )
-        sorterField = "name";
-    if ( sorterOrder == null )
-        sorterOrder = "ASC";
-
-    // Find out common suffix. Should this be before or after the sort?
-    String port_suffix = null;
-    if ( live.size() > 0 ) {
-        String name = live.get(0).getName();
-        int idx = name.indexOf(':');
-        if ( idx > 0 ) {
-            port_suffix = name.substring( idx );
-        }
-        
-        for ( int i=1; port_suffix != null && i < live.size(); i++ ) {
-            if ( live.get(i).getName().endsWith( port_suffix ) == false ) {
-                port_suffix = null;
-                break;
-            }
-        }
-    }
-        
-    counterReset();
-    long[] fsnStats = fsn.getStats(); 
-    long total = fsnStats[0];
-    long remaining = fsnStats[2];
-    long used = fsnStats[1];
-    long nonDFS = total - remaining - used;
-	nonDFS = nonDFS < 0 ? 0 : nonDFS; 
-    float percentUsed = total <= 0 
-        ? 0f : ((float)used * 100.0f)/(float)total;
-    float percentRemaining = total <= 0 
-        ? 100f : ((float)remaining * 100.0f)/(float)total;
-
-    out.print( "<div id=\"dfstable\"> <table>\n" +
-	       rowTxt() + colTxt() + "Configured Capacity" + colTxt() + ":" + colTxt() +
-	       StringUtils.byteDesc( total ) +
-	       rowTxt() + colTxt() + "DFS Used" + colTxt() + ":" + colTxt() +
-	       StringUtils.byteDesc( used ) +
-	       rowTxt() + colTxt() + "Non DFS Used" + colTxt() + ":" + colTxt() +
-	       StringUtils.byteDesc( nonDFS ) +
-	       rowTxt() + colTxt() + "DFS Remaining" + colTxt() + ":" + colTxt() +
-	       StringUtils.byteDesc( remaining ) +
-	       rowTxt() + colTxt() + "DFS Used%" + colTxt() + ":" + colTxt() +
-	       StringUtils.limitDecimalTo2(percentUsed) + " %" +
-	       rowTxt() + colTxt() + "DFS Remaining%" + colTxt() + ":" + colTxt() +
-	       StringUtils.limitDecimalTo2(percentRemaining) + " %" +
-	       rowTxt() + colTxt() +
-	       		"<a href=\"dfsnodelist.jsp?whatNodes=LIVE\">Live Nodes</a> " +
-	       		colTxt() + ":" + colTxt() + live.size() +
-	       rowTxt() + colTxt() +
-	       		"<a href=\"dfsnodelist.jsp?whatNodes=DEAD\">Dead Nodes</a> " +
-	       		colTxt() + ":" + colTxt() + dead.size() +
-               "</table></div><br>\n" );
-    
-    if (live.isEmpty() && dead.isEmpty()) {
-        out.print("There are no datanodes in the cluster");
-    }
-  }%>
-
-<%
-  NameNode nn = (NameNode)application.getAttribute("name.node");
-  FSNamesystem fsn = nn.getNamesystem();
-  String namenodeRole = nn.getRole().toString();
-  String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
-%>
-
-<html>
-
-<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-<title>Hadoop <%=namenodeRole%> <%=namenodeLabel%></title>
-    
-<body>
-<h1><%=namenodeRole%> '<%=namenodeLabel%>'</h1>
-<%= JspHelper.getVersionTable(fsn) %>
-<br />
-<b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br>
-<b><a href="/logs/"><%=namenodeRole%> Logs</a></b>
-
-<hr>
-<h3>Cluster Summary</h3>
-<b> <%= JspHelper.getSafeModeText(fsn)%> </b>
-<b> <%= JspHelper.getInodeLimitText(fsn)%> </b>
-<a class="warning"> <%= JspHelper.getWarningText(fsn)%></a>
-
-<%
-    generateDFSHealthReport(out, nn, request); 
-%>
-<hr>
-<%
-	generateConfReport(out, nn, request);
-%>
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 276
src/webapps/hdfs/dfsnodelist.jsp

@@ -1,276 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-contentType="text/html; charset=UTF-8"
-	import="javax.servlet.*"
-	import="javax.servlet.http.*"
-	import="java.io.*"
-	import="java.util.*"
-	import="org.apache.hadoop.fs.*"
-	import="org.apache.hadoop.hdfs.*"
-	import="org.apache.hadoop.hdfs.server.common.*"
-	import="org.apache.hadoop.hdfs.server.namenode.*"
-	import="org.apache.hadoop.hdfs.server.datanode.*"
-	import="org.apache.hadoop.hdfs.protocol.*"
-	import="org.apache.hadoop.util.*"
-	import="java.text.DateFormat"
-	import="java.lang.Math"
-	import="java.net.URLEncoder"
-%>
-<%!
-	int rowNum = 0;
-	int colNum = 0;
-
-	String rowTxt() { colNum = 0;
-	return "<tr class=\"" + (((rowNum++)%2 == 0)? "rowNormal" : "rowAlt")
-	+ "\"> "; }
-	String colTxt() { return "<td id=\"col" + ++colNum + "\"> "; }
-	void counterReset () { colNum = 0; rowNum = 0 ; }
-
-	long diskBytes = 1024 * 1024 * 1024;
-	String diskByteStr = "GB";
-
-	String sorterField = null;
-	String sorterOrder = null;
-	String whatNodes = "LIVE";
-
-String NodeHeaderStr(String name) {
-	String ret = "class=header";
-	String order = "ASC";
-	if ( name.equals( sorterField ) ) {
-		ret += sorterOrder;
-		if ( sorterOrder.equals("ASC") )
-			order = "DSC";
-	}
-	ret += " onClick=\"window.document.location=" +
-	"'/dfsnodelist.jsp?whatNodes="+whatNodes+"&sorter/field=" + name + "&sorter/order=" +
-	order + "'\" title=\"sort on this column\"";
-
-	return ret;
-}
-
-public void generateNodeData( JspWriter out, DatanodeDescriptor d,
-		String suffix, boolean alive,
-		int nnHttpPort )
-throws IOException {
-
-	/* Say the datanode is dn1.hadoop.apache.org with ip 192.168.0.5
-we use:
-1) d.getHostName():d.getPort() to display.
-Domain and port are stripped if they are common across the nodes.
-i.e. "dn1"
-2) d.getHost():d.Port() for "title".
-i.e. "192.168.0.5:50010"
-3) d.getHostName():d.getInfoPort() for url.
-i.e. "http://dn1.hadoop.apache.org:50075/..."
-Note that "d.getHost():d.getPort()" is what DFS clients use
-to interact with datanodes.
-	 */
-	// from nn_browsedfscontent.jsp:
-	String url = "http://" + d.getHostName() + ":" + d.getInfoPort() +
-	"/browseDirectory.jsp?namenodeInfoPort=" +
-	nnHttpPort + "&dir=" +
-	URLEncoder.encode("/", "UTF-8");
-
-	String name = d.getHostName() + ":" + d.getPort();
-	if ( !name.matches( "\\d+\\.\\d+.\\d+\\.\\d+.*" ) ) 
-		name = name.replaceAll( "\\.[^.:]*", "" );    
-	int idx = (suffix != null && name.endsWith( suffix )) ?
-			name.indexOf( suffix ) : -1;
-
-			out.print( rowTxt() + "<td class=\"name\"><a title=\""
-					+ d.getHost() + ":" + d.getPort() +
-					"\" href=\"" + url + "\">" +
-					(( idx > 0 ) ? name.substring(0, idx) : name) + "</a>" +
-					(( alive ) ? "" : "\n") );
-			if ( !alive )
-				return;
-
-			long c = d.getCapacity();
-			long u = d.getDfsUsed();
-			long nu = d.getNonDfsUsed();
-			long r = d.getRemaining();
-			String percentUsed = StringUtils.limitDecimalTo2(d.getDfsUsedPercent());    
-			String percentRemaining = StringUtils.limitDecimalTo2(d.getRemainingPercent());    
-
-			String adminState = (d.isDecommissioned() ? "Decommissioned" :
-				(d.isDecommissionInProgress() ? "Decommission In Progress":
-				"In Service"));
-
-			long timestamp = d.getLastUpdate();
-			long currentTime = System.currentTimeMillis();
-			out.print("<td class=\"lastcontact\"> " +
-					((currentTime - timestamp)/1000) +
-					"<td class=\"adminstate\">" +
-					adminState +
-					"<td align=\"right\" class=\"capacity\">" +
-					StringUtils.limitDecimalTo2(c*1.0/diskBytes) +
-					"<td align=\"right\" class=\"used\">" +
-					StringUtils.limitDecimalTo2(u*1.0/diskBytes) +      
-					"<td align=\"right\" class=\"nondfsused\">" +
-					StringUtils.limitDecimalTo2(nu*1.0/diskBytes) +      
-					"<td align=\"right\" class=\"remaining\">" +
-					StringUtils.limitDecimalTo2(r*1.0/diskBytes) +      
-					"<td align=\"right\" class=\"pcused\">" + percentUsed +
-					"<td class=\"pcused\">" +
-					ServletUtil.percentageGraph( (int)Double.parseDouble(percentUsed) , 100) +
-					"<td align=\"right\" class=\"pcremaining`\">" + percentRemaining +
-					"<td title=" + "\"blocks scheduled : " + d.getBlocksScheduled() + 
-					"\" class=\"blocks\">" + d.numBlocks() + "\n");
-}
-
-
-
-public void generateDFSNodesList(JspWriter out, 
-		NameNode nn,
-		HttpServletRequest request)
-throws IOException {
-	ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();    
-	ArrayList<DatanodeDescriptor> dead = new ArrayList<DatanodeDescriptor>();
-	nn.getNamesystem().DFSNodesStatus(live, dead);
-
-	whatNodes = request.getParameter("whatNodes"); // show only live or only dead nodes
-	sorterField = request.getParameter("sorter/field");
-	sorterOrder = request.getParameter("sorter/order");
-	if ( sorterField == null )
-		sorterField = "name";
-	if ( sorterOrder == null )
-		sorterOrder = "ASC";
-
-	JspHelper.sortNodeList(live, sorterField, sorterOrder);
-	JspHelper.sortNodeList(dead, "name", "ASC");
-
-	// Find out common suffix. Should this be before or after the sort?
-	String port_suffix = null;
-	if ( live.size() > 0 ) {
-		String name = live.get(0).getName();
-		int idx = name.indexOf(':');
-		if ( idx > 0 ) {
-			port_suffix = name.substring( idx );
-		}
-
-		for ( int i=1; port_suffix != null && i < live.size(); i++ ) {
-			if ( live.get(i).getName().endsWith( port_suffix ) == false ) {
-				port_suffix = null;
-				break;
-			}
-		}
-	}
-
-	counterReset();
-
-	try {
-		Thread.sleep(1000);
-	} catch (InterruptedException e) {}
-
-	if (live.isEmpty() && dead.isEmpty()) {
-		out.print("There are no datanodes in the cluster");
-	}
-	else {
-
-		int nnHttpPort = nn.getHttpAddress().getPort();
-		out.print( "<div id=\"dfsnodetable\"> ");
-		if(whatNodes.equals("LIVE")) {
-
-			out.print( 
-					"<a name=\"LiveNodes\" id=\"title\">" +
-					"Live Datanodes : " + live.size() + "</a>" +
-			"<br><br>\n<table border=1 cellspacing=0>\n" );
-
-			counterReset();
-
-			if ( live.size() > 0 ) {
-
-				if ( live.get(0).getCapacity() > 1024 * diskBytes ) {
-					diskBytes *= 1024;
-					diskByteStr = "TB";
-				}
-
-				out.print( "<tr class=\"headerRow\"> <th " +
-						NodeHeaderStr("name") + "> Node <th " +
-						NodeHeaderStr("lastcontact") + "> Last <br>Contact <th " +
-						NodeHeaderStr("adminstate") + "> Admin State <th " +
-						NodeHeaderStr("capacity") + "> Configured <br>Capacity (" + 
-						diskByteStr + ") <th " + 
-						NodeHeaderStr("used") + "> Used <br>(" + 
-						diskByteStr + ") <th " + 
-						NodeHeaderStr("nondfsused") + "> Non DFS <br>Used (" + 
-						diskByteStr + ") <th " + 
-						NodeHeaderStr("remaining") + "> Remaining <br>(" + 
-						diskByteStr + ") <th " + 
-						NodeHeaderStr("pcused") + "> Used <br>(%) <th " + 
-						NodeHeaderStr("pcused") + "> Used <br>(%) <th " +
-						NodeHeaderStr("pcremaining") + "> Remaining <br>(%) <th " +
-						NodeHeaderStr("blocks") + "> Blocks\n" );
-
-				JspHelper.sortNodeList(live, sorterField, sorterOrder);
-				for ( int i=0; i < live.size(); i++ ) {
-					generateNodeData(out, live.get(i), port_suffix, true, nnHttpPort);
-				}
-			}
-			out.print("</table>\n");
-		} else {
-
-			out.print("<br> <a name=\"DeadNodes\" id=\"title\"> " +
-					" Dead Datanodes : " +dead.size() + "</a><br><br>\n");
-
-			if ( dead.size() > 0 ) {
-				out.print( "<table border=1 cellspacing=0> <tr id=\"row1\"> " +
-				"<td> Node \n" );
-
-				JspHelper.sortNodeList(dead, "name", "ASC");
-				for ( int i=0; i < dead.size() ; i++ ) {
-					generateNodeData(out, dead.get(i), port_suffix, false, nnHttpPort);
-				}
-
-				out.print("</table>\n");
-			}
-		}
-		out.print("</div>");
-	}
-}%>
-
-<%
-NameNode nn = (NameNode)application.getAttribute("name.node");
-String namenodeRole = nn.getRole().toString();
-FSNamesystem fsn = nn.getNamesystem();
-String namenodeLabel = nn.getNameNodeAddress().getHostName() + ":" + nn.getNameNodeAddress().getPort();
-%>
-
-<html>
-
-<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-<title>Hadoop <%=namenodeRole%> <%=namenodeLabel%></title>
-  
-<body>
-<h1><%=namenodeRole%> '<%=namenodeLabel%>'</h1>
-<%= JspHelper.getVersionTable(fsn) %>
-<br />
-<b><a href="/nn_browsedfscontent.jsp">Browse the filesystem</a></b><br>
-<b><a href="/logs/"><%=namenodeRole%> Logs</a></b><br>
-<b><a href=/dfshealth.jsp> Go back to DFS home</a></b>
-<hr>
-<%
-	generateDFSNodesList(out, nn, request); 
-%>
-
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 35
src/webapps/hdfs/index.html

@@ -1,35 +0,0 @@
-<meta HTTP-EQUIV="REFRESH" content="0;url=dfshealth.jsp"/>
-<html>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<head>
-<title>Hadoop Administration</title>
-</head>
-
-<body>
-
-<h1>Hadoop Administration</h1>
-
-<ul>
-
-<li><a href="dfshealth.jsp">DFS Health/Status</a></li>
-
-</ul>
-
-</body>
-
-</html>

+ 0 - 77
src/webapps/hdfs/nn_browsedfscontent.jsp

@@ -1,77 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.hdfs.*"
-  import="org.apache.hadoop.hdfs.server.namenode.*"
-  import="org.apache.hadoop.hdfs.server.datanode.*"
-  import="org.apache.hadoop.hdfs.protocol.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.DateFormat"
-  import="java.net.InetAddress"
-  import="java.net.URLEncoder"
-%>
-<%!
-  public void redirectToRandomDataNode(
-                            NameNode nn, 
-                            HttpServletResponse resp) throws IOException {
-    FSNamesystem fsn = nn.getNamesystem();
-    String datanode = fsn.randomDataNode();
-    String redirectLocation;
-    String nodeToRedirect;
-    int redirectPort;
-    if (datanode != null) {
-      redirectPort = Integer.parseInt(datanode.substring(datanode.indexOf(':') + 1));
-      nodeToRedirect = datanode.substring(0, datanode.indexOf(':'));
-    }
-    else {
-      nodeToRedirect = nn.getHttpAddress().getHostName();
-      redirectPort = nn.getHttpAddress().getPort();
-    }
-    String fqdn = InetAddress.getByName(nodeToRedirect).getCanonicalHostName();
-    redirectLocation = "http://" + fqdn + ":" + redirectPort + 
-                       "/browseDirectory.jsp?namenodeInfoPort=" + 
-                       nn.getHttpAddress().getPort() +
-                       "&dir=" + URLEncoder.encode("/", "UTF-8");
-    resp.sendRedirect(redirectLocation);
-  }
-%>
-
-<html>
-
-<title></title>
-
-<body>
-<% 
-  NameNode nn = (NameNode)application.getAttribute("name.node");
-  redirectToRandomDataNode(nn, response); 
-%>
-<hr>
-
-<h2>Local logs</h2>
-<a href="/logs/">Log</a> directory
-
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 269
src/webapps/job/analysejobhistory.jsp

@@ -1,269 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.SimpleDateFormat"
-  import="org.apache.hadoop.mapred.JobHistory.*"
-%>
-<jsp:include page="loadhistory.jsp">
-  <jsp:param name="jobid" value="<%=request.getParameter("jobid") %>"/>
-  <jsp:param name="logFile" value="<%=request.getParameter("logFile") %>"/>
-</jsp:include>
-<%!	private static SimpleDateFormat dateFormat 
-                              = new SimpleDateFormat("d/MM HH:mm:ss") ; 
-%>
-<html><body>
-<%
-  String jobid = request.getParameter("jobid");
-  String logFile = request.getParameter("logFile");
-  String encodedLogFileName = JobHistory.JobInfo.encodeJobHistoryFilePath(logFile);
-  String numTasks = request.getParameter("numTasks");
-  int showTasks = 10 ; 
-  if (numTasks != null) {
-    showTasks = Integer.parseInt(numTasks);  
-  }
-  JobInfo job = (JobInfo)request.getSession().getAttribute("job");
-%>
-<h2>Hadoop Job <a href="jobdetailshistory.jsp?jobid=<%=jobid%>&&logFile=<%=encodedLogFileName%>"><%=jobid %> </a></h2>
-<b>User : </b> <%=job.get(Keys.USER) %><br/> 
-<b>JobName : </b> <%=job.get(Keys.JOBNAME) %><br/> 
-<b>JobConf : </b> <%=job.get(Keys.JOBCONF) %><br/> 
-<b>Submitted At : </b> <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getLong(Keys.SUBMIT_TIME), 0 ) %><br/> 
-<b>Launched At : </b> <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getLong(Keys.LAUNCH_TIME), job.getLong(Keys.SUBMIT_TIME)) %><br/>
-<b>Finished At : </b>  <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getLong(Keys.FINISH_TIME), job.getLong(Keys.LAUNCH_TIME)) %><br/>
-<b>Status : </b> <%= ((job.get(Keys.JOB_STATUS) == null)?"Incomplete" :job.get(Keys.JOB_STATUS)) %><br/> 
-<hr/>
-<center>
-<%
-  if (!Values.SUCCESS.name().equals(job.get(Keys.JOB_STATUS))) {
-    out.print("<h3>No Analysis available as job did not finish</h3>");
-    return;
-  }
-  Map<String, JobHistory.Task> tasks = job.getAllTasks();
-  int finishedMaps = job.getInt(Keys.FINISHED_MAPS)  ;
-  int finishedReduces = job.getInt(Keys.FINISHED_REDUCES) ;
-  JobHistory.Task [] mapTasks = new JobHistory.Task[finishedMaps]; 
-  JobHistory.Task [] reduceTasks = new JobHistory.Task[finishedReduces]; 
-  int mapIndex = 0 , reduceIndex=0; 
-  long avgMapTime = 0;
-  long avgReduceTime = 0;
-  long avgShuffleTime = 0;
-
-  for (JobHistory.Task task : tasks.values()) {
-    Map<String, TaskAttempt> attempts = task.getTaskAttempts();
-    for (JobHistory.TaskAttempt attempt : attempts.values()) {
-      if (attempt.get(Keys.TASK_STATUS).equals(Values.SUCCESS.name())) {
-        long avgFinishTime = (attempt.getLong(Keys.FINISH_TIME) -
-      		                attempt.getLong(Keys.START_TIME));
-        if (Values.MAP.name().equals(task.get(Keys.TASK_TYPE))) {
-          mapTasks[mapIndex++] = attempt ; 
-          avgMapTime += avgFinishTime;
-        } else if (Values.REDUCE.name().equals(task.get(Keys.TASK_TYPE))) { 
-          reduceTasks[reduceIndex++] = attempt;
-          avgShuffleTime += (attempt.getLong(Keys.SHUFFLE_FINISHED) - 
-                             attempt.getLong(Keys.START_TIME));
-          avgReduceTime += (attempt.getLong(Keys.FINISH_TIME) -
-                            attempt.getLong(Keys.SHUFFLE_FINISHED));
-        }
-        break;
-      }
-    }
-  }
-	 
-  if (finishedMaps > 0) {
-    avgMapTime /= finishedMaps;
-  }
-  if (finishedReduces > 0) {
-    avgReduceTime /= finishedReduces;
-    avgShuffleTime /= finishedReduces;
-  }
-  Comparator<JobHistory.Task> cMap = new Comparator<JobHistory.Task>(){
-    public int compare(JobHistory.Task t1, JobHistory.Task t2){
-      long l1 = t1.getLong(Keys.FINISH_TIME) - t1.getLong(Keys.START_TIME); 
-      long l2 = t2.getLong(Keys.FINISH_TIME) - t2.getLong(Keys.START_TIME);
-      return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
-    }
-  }; 
-  Comparator<JobHistory.Task> cShuffle = new Comparator<JobHistory.Task>(){
-    public int compare(JobHistory.Task t1, JobHistory.Task t2){
-      long l1 = t1.getLong(Keys.SHUFFLE_FINISHED) - 
-                t1.getLong(Keys.START_TIME); 
-      long l2 = t2.getLong(Keys.SHUFFLE_FINISHED) - 
-                t2.getLong(Keys.START_TIME); 
-      return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
-    }
-  }; 
-  Arrays.sort(mapTasks, cMap);
-  JobHistory.Task minMap = mapTasks[mapTasks.length-1] ;
-%>
-
-<h3>Time taken by best performing Map task 
-<a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>&taskid=<%=minMap.get(Keys.TASKID)%>">
-<%=minMap.get(Keys.TASKID) %></a> : <%=StringUtils.formatTimeDiff(minMap.getLong(Keys.FINISH_TIME), minMap.getLong(Keys.START_TIME) ) %></h3>
-<h3>Average time taken by Map tasks: 
-<%=StringUtils.formatTimeDiff(avgMapTime, 0) %></h3>
-<h3>Worse performing map tasks</h3>
-<table border="2" cellpadding="5" cellspacing="2">
-<tr><td>Task Id</td><td>Time taken</td></tr>
-<%
-  for (int i=0;i<showTasks && i<mapTasks.length; i++) {
-%>
-    <tr>
-    <td><a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>&taskid=<%=mapTasks[i].get(Keys.TASKID)%>">
-        <%=mapTasks[i].get(Keys.TASKID) %></a></td>
-    <td><%=StringUtils.formatTimeDiff(mapTasks[i].getLong(Keys.FINISH_TIME), mapTasks[i].getLong(Keys.START_TIME)) %></td>
-    </tr>
-<%
-  }
-%>
-</table>
-<%  
-  Comparator<JobHistory.Task> cFinishMapRed = 
-    new Comparator<JobHistory.Task>() {
-    public int compare(JobHistory.Task t1, JobHistory.Task t2){
-      long l1 = t1.getLong(Keys.FINISH_TIME); 
-      long l2 = t2.getLong(Keys.FINISH_TIME);
-      return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
-    }
-  };
-  Arrays.sort(mapTasks, cFinishMapRed);
-  JobHistory.Task lastMap = mapTasks[0] ;
-%>
-
-<h3>The last Map task 
-<a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>
-&taskid=<%=lastMap.get(Keys.TASKID)%>"><%=lastMap.get(Keys.TASKID) %></a> 
-finished at (relative to the Job launch time): 
-<%=StringUtils.getFormattedTimeWithDiff(dateFormat, 
-                              lastMap.getLong(Keys.FINISH_TIME), 
-                              job.getLong(Keys.LAUNCH_TIME) ) %></h3>
-<hr/>
-
-<%
-  if (reduceTasks.length <= 0) return;
-  Arrays.sort(reduceTasks, cShuffle); 
-  JobHistory.Task minShuffle = reduceTasks[reduceTasks.length-1] ;
-%>
-<h3>Time taken by best performing shuffle
-<a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>
-&taskid=<%=minShuffle.get(Keys.TASKID)%>"><%=minShuffle.get(Keys.TASKID)%></a> : 
-<%=StringUtils.formatTimeDiff(minShuffle.getLong(Keys.SHUFFLE_FINISHED), 
-                              minShuffle.getLong(Keys.START_TIME) ) %></h3>
-<h3>Average time taken by Shuffle: 
-<%=StringUtils.formatTimeDiff(avgShuffleTime, 0) %></h3>
-<h3>Worse performing Shuffle(s)</h3>
-<table border="2" cellpadding="5" cellspacing="2">
-<tr><td>Task Id</td><td>Time taken</td></tr>
-<%
-  for (int i=0;i<showTasks && i<reduceTasks.length; i++) {
-%>
-    <tr>
-    <td><a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=
-<%=encodedLogFileName%>&taskid=<%=reduceTasks[i].get(Keys.TASKID)%>">
-<%=reduceTasks[i].get(Keys.TASKID) %></a></td>
-    <td><%=
-           StringUtils.formatTimeDiff(
-                       reduceTasks[i].getLong(Keys.SHUFFLE_FINISHED),
-                       reduceTasks[i].getLong(Keys.START_TIME)) %>
-    </td>
-    </tr>
-<%
-  }
-%>
-</table>
-<%  
-  Comparator<JobHistory.Task> cFinishShuffle = 
-    new Comparator<JobHistory.Task>() {
-    public int compare(JobHistory.Task t1, JobHistory.Task t2){
-      long l1 = t1.getLong(Keys.SHUFFLE_FINISHED); 
-      long l2 = t2.getLong(Keys.SHUFFLE_FINISHED);
-      return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
-    }
-  };
-  Arrays.sort(reduceTasks, cFinishShuffle);
-  JobHistory.Task lastShuffle = reduceTasks[0] ;
-%>
-
-<h3>The last Shuffle  
-<a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>
-&taskid=<%=lastShuffle.get(Keys.TASKID)%>"><%=lastShuffle.get(Keys.TASKID)%>
-</a> finished at (relative to the Job launch time): 
-<%=StringUtils.getFormattedTimeWithDiff(dateFormat,
-                              lastShuffle.getLong(Keys.SHUFFLE_FINISHED), 
-                              job.getLong(Keys.LAUNCH_TIME) ) %></h3>
-
-<%
-  Comparator<JobHistory.Task> cReduce = new Comparator<JobHistory.Task>(){
-    public int compare(JobHistory.Task t1, JobHistory.Task t2){
-      long l1 = t1.getLong(Keys.FINISH_TIME) - 
-                t1.getLong(Keys.SHUFFLE_FINISHED); 
-      long l2 = t2.getLong(Keys.FINISH_TIME) - 
-                t2.getLong(Keys.SHUFFLE_FINISHED);
-      return (l2<l1 ? -1 : (l2==l1 ? 0 : 1));
-    }
-  }; 
-  Arrays.sort(reduceTasks, cReduce); 
-  JobHistory.Task minReduce = reduceTasks[reduceTasks.length-1] ;
-%>
-<hr/>
-<h3>Time taken by best performing Reduce task : 
-<a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>&taskid=<%=minReduce.get(Keys.TASKID)%>">
-<%=minReduce.get(Keys.TASKID) %></a> : 
-<%=StringUtils.formatTimeDiff(minReduce.getLong(Keys.FINISH_TIME),
-    minReduce.getLong(Keys.SHUFFLE_FINISHED) ) %></h3>
-
-<h3>Average time taken by Reduce tasks: 
-<%=StringUtils.formatTimeDiff(avgReduceTime, 0) %></h3>
-<h3>Worse performing reduce tasks</h3>
-<table border="2" cellpadding="5" cellspacing="2">
-<tr><td>Task Id</td><td>Time taken</td></tr>
-<%
-  for (int i=0;i<showTasks && i<reduceTasks.length; i++) {
-%>
-    <tr>
-    <td><a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>&taskid=<%=reduceTasks[i].get(Keys.TASKID)%>">
-        <%=reduceTasks[i].get(Keys.TASKID) %></a></td>
-    <td><%=StringUtils.formatTimeDiff(
-             reduceTasks[i].getLong(Keys.FINISH_TIME), 
-             reduceTasks[i].getLong(Keys.SHUFFLE_FINISHED)) %></td>
-    </tr>
-<%
-  }
-%>
-</table>
-<%  
-  Arrays.sort(reduceTasks, cFinishMapRed);
-  JobHistory.Task lastReduce = reduceTasks[0] ;
-%>
-
-<h3>The last Reduce task 
-<a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>
-&taskid=<%=lastReduce.get(Keys.TASKID)%>"><%=lastReduce.get(Keys.TASKID)%>
-</a> finished at (relative to the Job launch time): 
-<%=StringUtils.getFormattedTimeWithDiff(dateFormat,
-                              lastReduce.getLong(Keys.FINISH_TIME), 
-                              job.getLong(Keys.LAUNCH_TIME) ) %></h3>
-</center>
-</body></html>

+ 0 - 35
src/webapps/job/index.html

@@ -1,35 +0,0 @@
-<meta HTTP-EQUIV="REFRESH" content="0;url=jobtracker.jsp"/>
-<html>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<head>
-<title>Hadoop Administration</title>
-</head>
-
-<body>
-
-<h1>Hadoop Administration</h1>
-
-<ul>
-
-<li><a href="jobtracker.jsp">JobTracker</a></li>
-
-</ul>
-
-</body>
-
-</html>

+ 0 - 80
src/webapps/job/jobblacklistedtrackers.jsp

@@ -1,80 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-%>
-
-<%
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  String trackerName = 
-           StringUtils.simpleHostname(tracker.getJobTrackerMachine());
-%>
-<%!       
-  private void printBlackListedTrackers(JspWriter out, 
-                             JobInProgress job) throws IOException {
-    Map<String, Integer> trackerErrors = job.getTaskTrackerErrors();
-    out.print("<table border=2 cellpadding=\"5\" cellspacing=\"2\">");
-    out.print("<tr><th>TaskTracker</th><th>No. of Failures</th></tr>\n");
-    int maxErrorsPerTracker = job.getJobConf().getMaxTaskFailuresPerTracker();
-    for (Map.Entry<String,Integer> e : trackerErrors.entrySet()) {
-      if (e.getValue().intValue() >= maxErrorsPerTracker) {
-        out.print("<tr><td>" + e.getKey() + "</td><td>" + e.getValue() + 
-            "</td></tr>\n");
-      }
-    }
-    out.print("</table>\n");
-  }
-%>
-
-<%
-    String jobId = request.getParameter("jobid");
-    if (jobId == null) {
-  	  out.println("<h2>Missing 'jobid' for fetching black-listed tasktrackers!</h2>");
-  	  return;
-    }
-    
-    JobInProgress job = (JobInProgress) tracker.getJob(JobID.forName(jobId));
-    if (job == null) {
-      out.print("<b>Job " + jobId + " not found.</b><br>\n");
-      return;
-    }
-%>
-
-<html>
-<title>Hadoop <%=jobId%>'s black-listed tasktrackers</title>
-<body>
-<h1>Hadoop <a href="jobdetails.jsp?jobid=<%=jobId%>"><%=jobId%></a> - 
-Black-listed task-trackers</h1>
-
-<% 
-    printBlackListedTrackers(out, job); 
-%>
-
-<hr>
-<a href="jobdetails.jsp?jobid=<%=jobId%>">Go back to <%=jobId%></a><br>
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 71
src/webapps/job/jobconf.jsp

@@ -1,71 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.net.URL"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-%>
-
-
-<%
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  String jobId = request.getParameter("jobid");
-  if (jobId == null) {
-    out.println("<h2>Missing 'jobid' for fetching job configuration!</h2>");
- 	return;
-  }
-%>
-  
-<html>
-
-<title>Job Configuration: JobId - <%= jobId %></title>
-
-<body>
-<h2>Job Configuration: JobId - <%= jobId %></h2><br>
-
-<%
-  String jobFilePath = JobTracker.getLocalJobFilePath(JobID.forName(jobId));
-  FileInputStream jobFile = null;
-  try {
-    jobFile = new FileInputStream(jobFilePath);
-    JobConf jobConf = new JobConf(jobFilePath);
-    XMLUtils.transform(
-        jobConf.getConfResourceAsInputStream("webapps/static/jobconf.xsl"),
-        jobFile, out);
-  } catch (Exception e) {
-    out.println("Failed to retreive job configuration for job '" + jobId + "!");
-    out.println(e);
-  } finally {
-    if (jobFile != null) {
-      try { 
-        jobFile.close(); 
-      } catch (IOException e) {}
-    }
-  }
-%>
-
-<br>
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 75
src/webapps/job/jobconf_history.jsp

@@ -1,75 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.net.URL"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.fs.*"
-  import="org.apache.hadoop.util.*"
-%>
-
-
-<%
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  String jobId = request.getParameter("jobid");
-  if (jobId == null) {
-    out.println("<h2>Missing 'jobid' for fetching job configuration!</h2>");
- 	return;
-  }
-%>
-  
-<html>
-
-<title>Job Configuration: JobId - <%= jobId %></title>
-
-<body>
-<h2>Job Configuration: JobId - <%= jobId %></h2><br>
-
-<%
-  Path logDir = new Path(request.getParameter("jobLogDir"));
-  Path jobFilePath = new Path(logDir, 
-                       request.getParameter("jobUniqueString") + "_conf.xml");
-  FileSystem fs = (FileSystem)request.getSession().getAttribute("fs");
-  FSDataInputStream jobFile = null; 
-  try {
-    jobFile = fs.open(jobFilePath);
-    JobConf jobConf = new JobConf(jobFilePath);
-    XMLUtils.transform(
-        jobConf.getConfResourceAsInputStream("webapps/static/jobconf.xsl"),
-        jobFile, out);
-  } catch (Exception e) {
-    out.println("Failed to retreive job configuration for job '" + jobId + "!");
-    out.println(e);
-  } finally {
-    if (jobFile != null) {
-      try { 
-        jobFile.close(); 
-      } catch (IOException e) {}
-    }
-  } 
-%>
-
-<br>
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 400
src/webapps/job/jobdetails.jsp

@@ -1,400 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.text.*"
-  import="java.util.*"
-  import="java.text.DecimalFormat"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-%>
-
-<%
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  String trackerName = 
-           StringUtils.simpleHostname(tracker.getJobTrackerMachine());
-%>
-<%!
-  private static final String PRIVATE_ACTIONS_KEY 
-		= "webinterface.private.actions";
- 
-  private void printTaskSummary(JspWriter out,
-                                String jobId,
-                                String kind,
-                                double completePercent,
-                                TaskInProgress[] tasks
-                               ) throws IOException {
-    int totalTasks = tasks.length;
-    int runningTasks = 0;
-    int finishedTasks = 0;
-    int killedTasks = 0;
-    int failedTaskAttempts = 0;
-    int killedTaskAttempts = 0;
-    for(int i=0; i < totalTasks; ++i) {
-      TaskInProgress task = tasks[i];
-      if (task.isComplete()) {
-        finishedTasks += 1;
-      } else if (task.isRunning()) {
-        runningTasks += 1;
-      } else if (task.wasKilled()) {
-        killedTasks += 1;
-      }
-      failedTaskAttempts += task.numTaskFailures();
-      killedTaskAttempts += task.numKilledTasks();
-    }
-    int pendingTasks = totalTasks - runningTasks - killedTasks - finishedTasks; 
-    out.print("<tr><th><a href=\"jobtasks.jsp?jobid=" + jobId + 
-              "&type="+ kind + "&pagenum=1\">" + kind + 
-              "</a></th><td align=\"right\">" + 
-              StringUtils.formatPercent(completePercent, 2) +
-              ServletUtil.percentageGraph((int)(completePercent * 100), 80) +
-              "</td><td align=\"right\">" + 
-              totalTasks + 
-              "</td><td align=\"right\">" + 
-              ((pendingTasks > 0) 
-               ? "<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + 
-                 "&pagenum=1" + "&state=pending\">" + pendingTasks + "</a>"
-               : "0") + 
-              "</td><td align=\"right\">" + 
-              ((runningTasks > 0) 
-               ? "<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + 
-                 "&pagenum=1" + "&state=running\">" + runningTasks + "</a>" 
-               : "0") + 
-              "</td><td align=\"right\">" + 
-              ((finishedTasks > 0) 
-               ?"<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + 
-                "&pagenum=1" + "&state=completed\">" + finishedTasks + "</a>" 
-               : "0") + 
-              "</td><td align=\"right\">" + 
-              ((killedTasks > 0) 
-               ?"<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind +
-                "&pagenum=1" + "&state=killed\">" + killedTasks + "</a>"
-               : "0") + 
-              "</td><td align=\"right\">" + 
-              ((failedTaskAttempts > 0) ? 
-                  ("<a href=\"jobfailures.jsp?jobid=" + jobId + 
-                   "&kind=" + kind + "&cause=failed\">" + failedTaskAttempts + 
-                   "</a>") : 
-                  "0"
-                  ) + 
-              " / " +
-              ((killedTaskAttempts > 0) ? 
-                  ("<a href=\"jobfailures.jsp?jobid=" + jobId + 
-                   "&kind=" + kind + "&cause=killed\">" + killedTaskAttempts + 
-                   "</a>") : 
-                  "0"
-                  ) + 
-              "</td></tr>\n");
-  }
-
-  private void printJobLevelTaskSummary(JspWriter out,
-                                String jobId,
-                                String kind,
-                                TaskInProgress[] tasks
-                               ) throws IOException {
-    int totalTasks = tasks.length;
-    int runningTasks = 0;
-    int finishedTasks = 0;
-    int killedTasks = 0;
-    for(int i=0; i < totalTasks; ++i) {
-      TaskInProgress task = tasks[i];
-      if (task.isComplete()) {
-        finishedTasks += 1;
-      } else if (task.isRunning()) {
-        runningTasks += 1;
-      } else if (task.isFailed()) {
-        killedTasks += 1;
-      }
-    }
-    int pendingTasks = totalTasks - runningTasks - killedTasks - finishedTasks; 
-    out.print(((runningTasks > 0)  
-               ? "<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + 
-                 "&pagenum=1" + "&state=running\">" + " Running" + 
-                 "</a>" 
-               : ((pendingTasks > 0) ? " Pending" :
-                 ((finishedTasks > 0) 
-               ?"<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind + 
-                "&pagenum=1" + "&state=completed\">" + " Successful"
-                 + "</a>" 
-               : ((killedTasks > 0) 
-               ?"<a href=\"jobtasks.jsp?jobid=" + jobId + "&type="+ kind +
-                "&pagenum=1" + "&state=killed\">" + " Failed" 
-                + "</a>" : "None")))));
-  }
-  
-  private void printConfirm(JspWriter out, String jobId) throws IOException{
-    String url = "jobdetails.jsp?jobid=" + jobId;
-    out.print("<html><head><META http-equiv=\"refresh\" content=\"15;URL="
-        + url+"\"></head>"
-        + "<body><h3> Are you sure you want to kill " + jobId
-        + " ?<h3><br><table border=\"0\"><tr><td width=\"100\">"
-        + "<form action=\"" + url + "\" method=\"post\">"
-        + "<input type=\"hidden\" name=\"action\" value=\"kill\" />"
-        + "<input type=\"submit\" name=\"kill\" value=\"Kill\" />"
-        + "</form>"
-        + "</td><td width=\"100\"><form method=\"post\" action=\"" + url
-        + "\"><input type=\"submit\" value=\"Cancel\" name=\"Cancel\""
-        + "/></form></td></tr></table></body></html>");
-  }
-  
-%>       
-<%   
-    String jobId = request.getParameter("jobid"); 
-    String refreshParam = request.getParameter("refresh");
-    if (jobId == null) {
-      out.println("<h2>Missing 'jobid'!</h2>");
-      return;
-    }
-    
-    int refresh = 60; // refresh every 60 seconds by default
-    if (refreshParam != null) {
-        try {
-            refresh = Integer.parseInt(refreshParam);
-        }
-        catch (NumberFormatException ignored) {
-        }
-    }
-    JobID jobIdObj = JobID.forName(jobId);
-    JobInProgress job = (JobInProgress) tracker.getJob(jobIdObj);
-    
-    String action = request.getParameter("action");
-    if(JSPUtil.conf.getBoolean(PRIVATE_ACTIONS_KEY, false) && 
-        "changeprio".equalsIgnoreCase(action) 
-        && request.getMethod().equalsIgnoreCase("POST")) {
-      tracker.setJobPriority(jobIdObj, 
-                             JobPriority.valueOf(request.getParameter("prio")));
-    }
-    
-    if(JSPUtil.conf.getBoolean(PRIVATE_ACTIONS_KEY, false)) {
-        action = request.getParameter("action");
-	    if(action!=null && action.equalsIgnoreCase("confirm")) {
-  	      printConfirm(out, jobId);
-    	    return;
-	    }
-  	    else if(action != null && action.equalsIgnoreCase("kill") && 
-  	        request.getMethod().equalsIgnoreCase("POST")) {
-	      tracker.killJob(jobIdObj);
-	    }
-    }
-%>
-
-<%@page import="org.apache.hadoop.mapred.TaskGraphServlet"%>
-<html>
-<head>
-  <% 
-  if (refresh != 0) {
-      %>
-      <meta http-equiv="refresh" content="<%=refresh%>">
-      <%
-  }
-  %>
-<title>Hadoop <%=jobId%> on <%=trackerName%></title>
-<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-</head>
-<body>
-<h1>Hadoop <%=jobId%> on <a href="jobtracker.jsp"><%=trackerName%></a></h1>
-
-<% 
-    if (job == null) {
-      out.print("<b>Job " + jobId + " not found.</b><br>\n");
-      return;
-    }
-    JobProfile profile = job.getProfile();
-    JobStatus status = job.getStatus();
-    int runState = status.getRunState();
-    int flakyTaskTrackers = job.getNoOfBlackListedTrackers();
-    out.print("<b>User:</b> " + profile.getUser() + "<br>\n");
-    out.print("<b>Job Name:</b> " + profile.getJobName() + "<br>\n");
-    out.print("<b>Job File:</b> <a href=\"jobconf.jsp?jobid=" + jobId + "\">" 
-              + profile.getJobFile() + "</a><br>\n");
-    out.print("<b>Job Setup:</b>");
-    printJobLevelTaskSummary(out, jobId, "setup", job.getSetupTasks());
-    out.print("<br>\n");
-    if (runState == JobStatus.RUNNING) {
-      out.print("<b>Status:</b> Running<br>\n");
-      out.print("<b>Started at:</b> " + new Date(job.getStartTime()) + "<br>\n");
-      out.print("<b>Running for:</b> " + StringUtils.formatTimeDiff(
-          System.currentTimeMillis(), job.getStartTime()) + "<br>\n");
-    } else {
-      if (runState == JobStatus.SUCCEEDED) {
-        out.print("<b>Status:</b> Succeeded<br>\n");
-        out.print("<b>Started at:</b> " + new Date(job.getStartTime()) + "<br>\n");
-        out.print("<b>Finished at:</b> " + new Date(job.getFinishTime()) +
-                  "<br>\n");
-        out.print("<b>Finished in:</b> " + StringUtils.formatTimeDiff(
-            job.getFinishTime(), job.getStartTime()) + "<br>\n");
-      } else if (runState == JobStatus.FAILED) {
-        out.print("<b>Status:</b> Failed<br>\n");
-        out.print("<b>Started at:</b> " + new Date(job.getStartTime()) + "<br>\n");
-        out.print("<b>Failed at:</b> " + new Date(job.getFinishTime()) +
-                  "<br>\n");
-        out.print("<b>Failed in:</b> " + StringUtils.formatTimeDiff(
-            job.getFinishTime(), job.getStartTime()) + "<br>\n");
-      } else if (runState == JobStatus.KILLED) {
-        out.print("<b>Status:</b> Killed<br>\n");
-        out.print("<b>Started at:</b> " + new Date(job.getStartTime()) + "<br>\n");
-        out.print("<b>Killed at:</b> " + new Date(job.getFinishTime()) +
-                  "<br>\n");
-        out.print("<b>Killed in:</b> " + StringUtils.formatTimeDiff(
-            job.getFinishTime(), job.getStartTime()) + "<br>\n");
-      }
-    }
-    out.print("<b>Job Cleanup:</b>");
-    printJobLevelTaskSummary(out, jobId, "cleanup", job.getCleanupTasks());
-    out.print("<br>\n");
-    if (flakyTaskTrackers > 0) {
-      out.print("<b>Black-listed TaskTrackers:</b> " + 
-          "<a href=\"jobblacklistedtrackers.jsp?jobid=" + jobId + "\">" +
-          flakyTaskTrackers + "</a><br>\n");
-    }
-    out.print("<hr>\n");
-    out.print("<table border=2 cellpadding=\"5\" cellspacing=\"2\">");
-    out.print("<tr><th>Kind</th><th>% Complete</th><th>Num Tasks</th>" +
-              "<th>Pending</th><th>Running</th><th>Complete</th>" +
-              "<th>Killed</th>" +
-              "<th><a href=\"jobfailures.jsp?jobid=" + jobId + 
-              "\">Failed/Killed<br>Task Attempts</a></th></tr>\n");
-    printTaskSummary(out, jobId, "map", status.mapProgress(), 
-                     job.getMapTasks());
-    printTaskSummary(out, jobId, "reduce", status.reduceProgress(),
-                     job.getReduceTasks());
-    out.print("</table>\n");
-    
-    %>
-    <p/>
-    <table border=2 cellpadding="5" cellspacing="2">
-    <tr>
-      <th><br/></th>
-      <th>Counter</th>
-      <th>Map</th>
-      <th>Reduce</th>
-      <th>Total</th>
-    </tr>
-    <%
-    Counters mapCounters = job.getMapCounters();
-    Counters reduceCounters = job.getReduceCounters();
-    Counters totalCounters = job.getCounters();
-    
-    for (String groupName : totalCounters.getGroupNames()) {
-      Counters.Group totalGroup = totalCounters.getGroup(groupName);
-      Counters.Group mapGroup = mapCounters.getGroup(groupName);
-      Counters.Group reduceGroup = reduceCounters.getGroup(groupName);
-      
-      Format decimal = new DecimalFormat();
-      
-      boolean isFirst = true;
-      for (Counters.Counter counter : totalGroup) {
-        String name = counter.getDisplayName();
-        String mapValue = decimal.format(mapGroup.getCounter(name));
-        String reduceValue = decimal.format(reduceGroup.getCounter(name));
-        String totalValue = decimal.format(counter.getCounter());
-        %>
-        <tr>
-          <%
-          if (isFirst) {
-            isFirst = false;
-            %>
-            <td rowspan="<%=totalGroup.size()%>"><%=totalGroup.getDisplayName()%></td>
-            <%
-          }
-          %>
-          <td><%=name%></td>
-          <td align="right"><%=mapValue%></td>
-          <td align="right"><%=reduceValue%></td>
-          <td align="right"><%=totalValue%></td>
-        </tr>
-        <%
-      }
-    }
-    %>
-    </table>
-
-<hr>Map Completion Graph - 
-<%
-if("off".equals(request.getParameter("map.graph"))) {
-  session.setAttribute("map.graph", "off");
-} else if("on".equals(request.getParameter("map.graph"))){
-  session.setAttribute("map.graph", "on");
-}
-if("off".equals(request.getParameter("reduce.graph"))) {
-  session.setAttribute("reduce.graph", "off");
-} else if("on".equals(request.getParameter("reduce.graph"))){
-  session.setAttribute("reduce.graph", "on");
-}
-
-if("off".equals(session.getAttribute("map.graph"))) { %>
-<a href="/jobdetails.jsp?jobid=<%=jobId%>&refresh=<%=refresh%>&map.graph=on" > open </a>
-<%} else { %> 
-<a href="/jobdetails.jsp?jobid=<%=jobId%>&refresh=<%=refresh%>&map.graph=off" > close </a>
-<br><embed src="/taskgraph?type=map&jobid=<%=jobId%>" 
-       width="<%=TaskGraphServlet.width + 2 * TaskGraphServlet.xmargin%>" 
-       height="<%=TaskGraphServlet.height + 3 * TaskGraphServlet.ymargin%>"
-       style="width:100%" type="image/svg+xml" pluginspage="http://www.adobe.com/svg/viewer/install/" />
-<%}%>
-
-<%if(job.getReduceTasks().length > 0) { %>
-<hr>Reduce Completion Graph -
-<%if("off".equals(session.getAttribute("reduce.graph"))) { %>
-<a href="/jobdetails.jsp?jobid=<%=jobId%>&refresh=<%=refresh%>&reduce.graph=on" > open </a>
-<%} else { %> 
-<a href="/jobdetails.jsp?jobid=<%=jobId%>&refresh=<%=refresh%>&reduce.graph=off" > close </a>
- 
- <br><embed src="/taskgraph?type=reduce&jobid=<%=jobId%>" 
-       width="<%=TaskGraphServlet.width + 2 * TaskGraphServlet.xmargin%>" 
-       height="<%=TaskGraphServlet.height + 3 * TaskGraphServlet.ymargin%>" 
-       style="width:100%" type="image/svg+xml" pluginspage="http://www.adobe.com/svg/viewer/install/" />
-<%} }%>
-
-<hr>
-<% if(JSPUtil.conf.getBoolean(PRIVATE_ACTIONS_KEY, false)) { %>
-  <table border="0"> <tr> <td>
-  Change priority from <%=job.getPriority()%> to:
-  <form action="jobdetails.jsp" method="post">
-  <input type="hidden" name="action" value="changeprio"/>
-  <input type="hidden" name="jobid" value="<%=jobId%>"/>
-  </td><td> <select name="prio"> 
-  <%
-    JobPriority jobPrio = job.getPriority();
-    for (JobPriority prio : JobPriority.values()) {
-      if(jobPrio != prio) {
-        %> <option value=<%=prio%>><%=prio%></option> <%
-      }
-    }
-  %>
-  </select> </td><td><input type="submit" value="Submit"> </form></td></tr> </table>
-<% } %>
-
-<table border="0"> <tr>
-    
-<% if(JSPUtil.conf.getBoolean(PRIVATE_ACTIONS_KEY, false) 
-    	&& runState == JobStatus.RUNNING) { %>
-	<br/><a href="jobdetails.jsp?action=confirm&jobid=<%=jobId%>"> Kill this job </a>
-<% } %>
-
-<hr>
-
-<hr>
-<a href="jobtracker.jsp">Go back to JobTracker</a><br>
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 280
src/webapps/job/jobdetailshistory.jsp

@@ -1,280 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.fs.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.SimpleDateFormat"
-  import="org.apache.hadoop.mapred.JobHistory.*"
-%>
-<jsp:include page="loadhistory.jsp">
-  <jsp:param name="jobid" value="<%=request.getParameter("jobid") %>"/>
-  <jsp:param name="logFile" value="<%=request.getParameter("logFile") %>"/>
-</jsp:include>
-<%! static SimpleDateFormat dateFormat = new SimpleDateFormat("d-MMM-yyyy HH:mm:ss") ; %>
-<%
-    String jobid = request.getParameter("jobid");
-    String logFile = request.getParameter("logFile");
-	String encodedLogFileName = JobHistory.JobInfo.encodeJobHistoryFilePath(logFile);
-	
-    Path jobFile = new Path(logFile);
-    String[] jobDetails = jobFile.getName().split("_");
-    String jobUniqueString = jobDetails[0] + "_" +jobDetails[1] + "_" + jobid ;
-	
-    JobInfo job = (JobInfo)request.getSession().getAttribute("job");
-    FileSystem fs = (FileSystem)request.getSession().getAttribute("fs");
-%>
-<html><body>
-<h2>Hadoop Job <%=jobid %> on <a href="jobhistory.jsp">History Viewer</a></h2>
-
-<b>User: </b> <%=job.get(Keys.USER) %><br/> 
-<b>JobName: </b> <%=job.get(Keys.JOBNAME) %><br/> 
-<b>JobConf: </b> <a href="jobconf_history.jsp?jobid=<%=jobid%>&jobLogDir=<%=new Path(logFile).getParent().toString()%>&jobUniqueString=<%=jobUniqueString%>"> 
-                 <%=job.get(Keys.JOBCONF) %></a><br/> 
-<b>Submitted At: </b> <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getLong(Keys.SUBMIT_TIME), 0 )  %><br/> 
-<b>Launched At: </b> <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getLong(Keys.LAUNCH_TIME), job.getLong(Keys.SUBMIT_TIME)) %><br/>
-<b>Finished At: </b>  <%=StringUtils.getFormattedTimeWithDiff(dateFormat, job.getLong(Keys.FINISH_TIME), job.getLong(Keys.LAUNCH_TIME)) %><br/>
-<b>Status: </b> <%= ((job.get(Keys.JOB_STATUS) == "")?"Incomplete" :job.get(Keys.JOB_STATUS)) %><br/> 
-<%
-    Map<String, JobHistory.Task> tasks = job.getAllTasks();
-    int totalMaps = 0 ; 
-    int totalReduces = 0;
-    int totalCleanups = 0; 
-    int totalSetups = 0; 
-    int numFailedMaps = 0; 
-    int numKilledMaps = 0;
-    int numFailedReduces = 0 ; 
-    int numKilledReduces = 0;
-    int numFinishedCleanups = 0;
-    int numFailedCleanups = 0;
-    int numKilledCleanups = 0;
-    int numFinishedSetups = 0;
-    int numFailedSetups = 0;
-    int numKilledSetups = 0;
-	
-    long mapStarted = 0 ; 
-    long mapFinished = 0 ; 
-    long reduceStarted = 0 ; 
-    long reduceFinished = 0;
-    long cleanupStarted = 0;
-    long cleanupFinished = 0; 
-    long setupStarted = 0;
-    long setupFinished = 0; 
-        
-    Map <String,String> allHosts = new TreeMap<String,String>();
-    for (JobHistory.Task task : tasks.values()) {
-      Map<String, TaskAttempt> attempts = task.getTaskAttempts();
-      allHosts.put(task.get(Keys.HOSTNAME), "");
-      for (TaskAttempt attempt : attempts.values()) {
-        long startTime = attempt.getLong(Keys.START_TIME) ; 
-        long finishTime = attempt.getLong(Keys.FINISH_TIME) ; 
-        if (Values.MAP.name().equals(task.get(Keys.TASK_TYPE))){
-          if (mapStarted==0 || mapStarted > startTime ) {
-            mapStarted = startTime; 
-          }
-          if (mapFinished < finishTime ) {
-            mapFinished = finishTime ; 
-          }
-          totalMaps++; 
-          if (Values.FAILED.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numFailedMaps++; 
-          } else if (Values.KILLED.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numKilledMaps++;
-          }
-        } else if (Values.REDUCE.name().equals(task.get(Keys.TASK_TYPE))) {
-          if (reduceStarted==0||reduceStarted > startTime) {
-            reduceStarted = startTime ; 
-          }
-          if (reduceFinished < finishTime) {
-            reduceFinished = finishTime; 
-          }
-          totalReduces++; 
-          if (Values.FAILED.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numFailedReduces++;
-          } else if (Values.KILLED.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numKilledReduces++;
-          }
-        } else if (Values.CLEANUP.name().equals(task.get(Keys.TASK_TYPE))) {
-          if (cleanupStarted==0||cleanupStarted > startTime) {
-            cleanupStarted = startTime ; 
-          }
-          if (cleanupFinished < finishTime) {
-            cleanupFinished = finishTime; 
-          }
-          totalCleanups++; 
-          if (Values.SUCCESS.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numFinishedCleanups++;
-          } else if (Values.FAILED.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numFailedCleanups++;
-          } else if (Values.KILLED.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numKilledCleanups++;
-          } 
-        } else if (Values.SETUP.name().equals(task.get(Keys.TASK_TYPE))) {
-          if (setupStarted==0||setupStarted > startTime) {
-            setupStarted = startTime ; 
-          }
-          if (setupFinished < finishTime) {
-            setupFinished = finishTime; 
-          }
-          totalSetups++; 
-          if (Values.SUCCESS.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numFinishedSetups++;
-          } else if (Values.FAILED.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numFailedSetups++;
-          } else if (Values.KILLED.name().equals(attempt.get(Keys.TASK_STATUS))) {
-            numKilledSetups++;
-          }
-        }
-      }
-    }
-%>
-<b><a href="analysejobhistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>">Analyse This Job</a></b> 
-<hr/>
-<center>
-<table border="2" cellpadding="5" cellspacing="2">
-<tr>
-<td>Kind</td><td>Total Tasks(successful+failed+killed)</td><td>Successful tasks</td><td>Failed tasks</td><td>Killed tasks</td><td>Start Time</td><td>Finish Time</td>
-</tr>
-<tr>
-<td>Setup</td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.SETUP.name() %>&status=all">
-        <%=totalSetups%></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.SETUP.name() %>&status=<%=Values.SUCCESS %>">
-        <%=numFinishedSetups%></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.SETUP.name() %>&status=<%=Values.FAILED %>">
-        <%=numFailedSetups%></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.SETUP.name() %>&status=<%=Values.KILLED %>">
-        <%=numKilledSetups%></a></td>  
-    <td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, setupStarted, 0) %></td>
-    <td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, setupFinished, setupStarted) %></td>
-</tr>
-<tr>
-<td>Map</td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.MAP.name() %>&status=all">
-        <%=totalMaps %></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.MAP.name() %>&status=<%=Values.SUCCESS %>">
-        <%=job.getInt(Keys.FINISHED_MAPS) %></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.MAP.name() %>&status=<%=Values.FAILED %>">
-        <%=numFailedMaps %></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.MAP.name() %>&status=<%=Values.KILLED %>">
-        <%=numKilledMaps %></a></td>
-    <td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, mapStarted, 0) %></td>
-    <td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, mapFinished, mapStarted) %></td>
-</tr>
-<tr>
-<td>Reduce</td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.REDUCE.name() %>&status=all">
-        <%=totalReduces%></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.REDUCE.name() %>&status=<%=Values.SUCCESS %>">
-        <%=job.getInt(Keys.FINISHED_REDUCES)%></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.REDUCE.name() %>&status=<%=Values.FAILED %>">
-        <%=numFailedReduces%></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.REDUCE.name() %>&status=<%=Values.KILLED %>">
-        <%=numKilledReduces%></a></td>  
-    <td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, reduceStarted, 0) %></td>
-    <td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, reduceFinished, reduceStarted) %></td>
-</tr>
-<tr>
-<td>Cleanup</td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.CLEANUP.name() %>&status=all">
-        <%=totalCleanups%></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.CLEANUP.name() %>&status=<%=Values.SUCCESS %>">
-        <%=numFinishedCleanups%></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.CLEANUP.name() %>&status=<%=Values.FAILED %>">
-        <%=numFailedCleanups%></a></td>
-    <td><a href="jobtaskshistory.jsp?jobid=<%=jobid %>&logFile=<%=encodedLogFileName%>&taskType=<%=Values.CLEANUP.name() %>&status=<%=Values.KILLED %>">
-        <%=numKilledCleanups%></a></td>  
-    <td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, cleanupStarted, 0) %></td>
-    <td><%=StringUtils.getFormattedTimeWithDiff(dateFormat, cleanupFinished, cleanupStarted) %></td>
-</tr>
-</table>
-
-<br/>
- <%
-    DefaultJobHistoryParser.FailedOnNodesFilter filter = 
-                 new DefaultJobHistoryParser.FailedOnNodesFilter();
-    JobHistory.parseHistoryFromFS(logFile, filter, fs); 
-    Map<String, Set<String>> badNodes = filter.getValues(); 
-    if (badNodes.size() > 0) {
- %>
-<h3>Failed tasks attempts by nodes </h3>
-<table border="1">
-<tr><td>Hostname</td><td>Failed Tasks</td></tr>
- <%	  
-      for (Map.Entry<String, Set<String>> entry : badNodes.entrySet()) {
-        String node = entry.getKey();
-        Set<String> failedTasks = entry.getValue();
-%>
-        <tr>
-        <td><%=node %></td>
-        <td>
-<%
-        for (String t : failedTasks) {
-%>
-          <a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>&taskid=<%=t %>"><%=t %></a>,&nbsp;
-<%		  
-        }
-%>	
-        </td>
-        </tr>
-<%	  
-      }
-	}
- %>
-</table>
-<br/>
- <%
-    DefaultJobHistoryParser.KilledOnNodesFilter killedFilter =
-                 new DefaultJobHistoryParser.KilledOnNodesFilter();
-    JobHistory.parseHistoryFromFS(logFile, filter, fs); 
-    badNodes = killedFilter.getValues(); 
-    if (badNodes.size() > 0) {
- %>
-<h3>Killed tasks attempts by nodes </h3>
-<table border="1">
-<tr><td>Hostname</td><td>Killed Tasks</td></tr>
- <%	  
-      for (Map.Entry<String, Set<String>> entry : badNodes.entrySet()) {
-        String node = entry.getKey();
-        Set<String> killedTasks = entry.getValue();
-%>
-        <tr>
-        <td><%=node %></td>
-        <td>
-<%
-        for (String t : killedTasks) {
-%>
-          <a href="taskdetailshistory.jsp?jobid=<%=jobid%>&logFile=<%=encodedLogFileName%>&taskid=<%=t %>"><%=t %></a>,&nbsp;
-<%		  
-        }
-%>	
-        </td>
-        </tr>
-<%	  
-      }
-    }
-%>
-</table>
-</center>
-</body></html>

+ 0 - 187
src/webapps/job/jobfailures.jsp

@@ -1,187 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-%>
-
-<%
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  String trackerName = 
-           StringUtils.simpleHostname(tracker.getJobTrackerMachine());
-%>
-<%! 
-  private void printFailedAttempts(JspWriter out,
-                                   JobTracker tracker,
-                                   JobID jobId,
-                                   TaskInProgress tip,
-                                   TaskStatus.State failState) throws IOException {
-    TaskStatus[] statuses = tip.getTaskStatuses();
-    TaskID tipId = tip.getTIPId();
-    for(int i=0; i < statuses.length; ++i) {
-      TaskStatus.State taskState = statuses[i].getRunState();
-      if ((failState == null && (taskState == TaskStatus.State.FAILED || 
-          taskState == TaskStatus.State.KILLED)) || taskState == failState) {
-        String taskTrackerName = statuses[i].getTaskTracker();
-        TaskTrackerStatus taskTracker = tracker.getTaskTracker(taskTrackerName);
-        out.print("<tr><td>" + statuses[i].getTaskID() +
-                  "</td><td><a href=\"taskdetails.jsp?jobid="+ jobId + 
-                  "&tipid=" + tipId + "\">" + tipId +
-                  "</a></td>");
-        if (taskTracker == null) {
-          out.print("<td>" + taskTrackerName + "</td>");
-        } else {
-          out.print("<td><a href=\"http://" + taskTracker.getHost() + ":" +
-                    taskTracker.getHttpPort() + "\">" +  taskTracker.getHost() + 
-                    "</a></td>");
-        }
-        out.print("<td>" + taskState + "</td>");
-        out.print("<td><pre>");
-        String[] failures = 
-                     tracker.getTaskDiagnostics(statuses[i].getTaskID());
-        if (failures == null) {
-          out.print("&nbsp;");
-        } else {
-          for(int j = 0 ; j < failures.length ; j++){
-            out.print(failures[j]);
-            if (j < (failures.length - 1)) {
-              out.print("\n-------\n");
-            }
-          }
-        }
-        out.print("</pre></td>");
-        
-        out.print("<td>");
-        String taskLogUrl = null;
-        if (taskTracker != null) {
-          taskLogUrl = TaskLogServlet.getTaskLogUrl(taskTracker.getHost(),
-                                String.valueOf(taskTracker.getHttpPort()),
-                                statuses[i].getTaskID().toString());
-        }
-        if (taskLogUrl != null) {
-          String tailFourKBUrl = taskLogUrl + "&start=-4097";
-          String tailEightKBUrl = taskLogUrl + "&start=-8193";
-          String entireLogUrl = taskLogUrl;
-          out.print("<a href=\"" + tailFourKBUrl + "\">Last 4KB</a><br/>");
-          out.print("<a href=\"" + tailEightKBUrl + "\">Last 8KB</a><br/>");
-          out.print("<a href=\"" + entireLogUrl + "\">All</a><br/>");
-        } else { 
-          out.print("n/a"); // task tracker was lost
-        }
-        out.print("</td>");
-        
-        out.print("</tr>\n");
-       }
-    }
-  }
-             
-  private void printFailures(JspWriter out, 
-                             JobTracker tracker,
-                             JobID jobId,
-                             String kind, 
-                             String cause) throws IOException {
-    JobInProgress job = (JobInProgress) tracker.getJob(jobId);
-    if (job == null) {
-      out.print("<b>Job " + jobId + " not found.</b><br>\n");
-      return;
-    }
-    
-    boolean includeMap = false;
-    boolean includeReduce = false;
-    if (kind == null) {
-      includeMap = true;
-      includeReduce = true;
-    } else if ("map".equals(kind)) {
-      includeMap = true;
-    } else if ("reduce".equals(kind)) {
-      includeReduce = true;
-    } else if ("all".equals(kind)) {
-      includeMap = true;
-      includeReduce = true;
-    } else {
-      out.print("<b>Kind " + kind + " not supported.</b><br>\n");
-      return;
-    }
-    
-    TaskStatus.State state = null;
-    try {
-      if (cause != null) {
-        state = TaskStatus.State.valueOf(cause.toUpperCase());
-        if (state != TaskStatus.State.FAILED && state != TaskStatus.State.KILLED) {
-          out.print("<b>Cause '" + cause + 
-              "' is not an 'unsuccessful' state.</b><br>\n");
-          return;
-        }
-      }
-    } catch (IllegalArgumentException e) {
-      out.print("<b>Cause '" + cause + "' not supported.</b><br>\n");
-      return;
-    }
-    	
-    out.print("<table border=2 cellpadding=\"5\" cellspacing=\"2\">");
-    out.print("<tr><th>Attempt</th><th>Task</th><th>Machine</th><th>State</th>" +
-              "<th>Error</th><th>Logs</th></tr>\n");
-    if (includeMap) {
-      TaskInProgress[] tips = job.getMapTasks();
-      for(int i=0; i < tips.length; ++i) {
-        printFailedAttempts(out, tracker, jobId, tips[i], state);
-      }
-    }
-    if (includeReduce) {
-      TaskInProgress[] tips = job.getReduceTasks();
-      for(int i=0; i < tips.length; ++i) {
-        printFailedAttempts(out, tracker, jobId, tips[i], state);
-      }
-    }
-    out.print("</table>\n");
-  }
-%>
-
-<%
-    String jobId = request.getParameter("jobid");
-    if (jobId == null) {
-      out.println("<h2>Missing 'jobid'!</h2>");
-      return;
-    }
-    JobID jobIdObj = JobID.forName(jobId);
-    String kind = request.getParameter("kind");
-    String cause = request.getParameter("cause");
-%>
-
-<html>
-<title>Hadoop <%=jobId%> failures on <%=trackerName%></title>
-<body>
-<h1>Hadoop <a href="jobdetails.jsp?jobid=<%=jobId%>"><%=jobId%></a>
-failures on <a href="jobtracker.jsp"><%=trackerName%></a></h1>
-
-<% 
-    printFailures(out, tracker, jobIdObj, kind, cause); 
-%>
-
-<hr>
-<a href="jobtracker.jsp">Go back to JobTracker</a><br>
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 324
src/webapps/job/jobhistory.jsp

@@ -1,324 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-  import="org.apache.hadoop.fs.*"
-  import="javax.servlet.jsp.*"
-  import="java.text.SimpleDateFormat"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.mapred.JobHistory.*"
-%>
-<%	
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  String trackerName =
-           StringUtils.simpleHostname(tracker.getJobTrackerMachine());
-%>
-<%!	
-  private static SimpleDateFormat dateFormat = 
-                                    new SimpleDateFormat("d/MM HH:mm:ss");
-%>
-<html>
-<head>
-<script type="text/JavaScript">
-<!--
-function showUserHistory(search)
-{
-var url
-if (search == null || "".equals(search)) {
-  url="jobhistory.jsp";
-} else {
-  url="jobhistory.jsp?pageno=1&search=" + search;
-}
-window.location.href = url;
-}
-//-->
-</script>
-<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-<title><%= trackerName %> Hadoop Map/Reduce History Viewer</title>
-<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-</head>
-<body>
-<h1> <a href="jobtracker.jsp"><%= trackerName %></a> Hadoop Map/Reduce 
-     <a href="jobhistory.jsp">History Viewer</a></h1>
-<hr>
-<%
-    final String search = (request.getParameter("search") == null)
-                          ? ""
-                          : request.getParameter("search");
-
-    String parts[] = search.split(":");
-
-    final String user = (parts.length >= 1)
-                        ? parts[0].toLowerCase()
-                        : "";
-    final String jobname = (parts.length >= 2)
-                           ? parts[1].toLowerCase()
-                           : "";
-    PathFilter jobLogFileFilter = new PathFilter() {
-      private boolean matchUser(String fileName) {
-        // return true if 
-        //  - user is not specified
-        //  - user matches
-        return "".equals(user) || user.equals(fileName.split("_")[5]);
-      }
-
-      private boolean matchJobName(String fileName) {
-        // return true if 
-        //  - jobname is not specified
-        //  - jobname contains the keyword
-        return "".equals(jobname) || fileName.split("_")[6].toLowerCase().contains(jobname);
-      }
-
-      public boolean accept(Path path) {
-        return !(path.getName().endsWith(".xml")) && matchUser(path.getName()) && matchJobName(path.getName());
-      }
-    };
-    
-    FileSystem fs = (FileSystem) application.getAttribute("fileSys");
-    String historyLogDir = (String) application.getAttribute("historyLogDir");
-    if (fs == null) {
-      out.println("Null file system. May be namenode is in safemode!");
-      return;
-    }
-    Path[] jobFiles = FileUtil.stat2Paths(fs.listStatus(new Path(historyLogDir),
-                                          jobLogFileFilter));
-    out.println("<!--  user : " + user + ", jobname : " + jobname + "-->");
-    if (null == jobFiles || jobFiles.length == 0)  {
-      out.println("No files found!"); 
-      return ; 
-    }
-
-    // get the pageno
-    int pageno = request.getParameter("pageno") == null
-                ? 1
-                : Integer.parseInt(request.getParameter("pageno"));
-
-    // get the total number of files to display
-    int size = 100;
-
-    // if show-all is requested or jobfiles < size(100)
-    if (pageno == -1 || size > jobFiles.length) {
-      size = jobFiles.length;
-    }
-
-    if (pageno == -1) { // special case 'show all'
-      pageno = 1;
-    }
-
-    int maxPageNo = (int)Math.ceil((float)jobFiles.length / size);
-
-    // check and fix pageno
-    if (pageno < 1 || pageno > maxPageNo) {
-      out.println("Invalid page index");
-      return ;
-    }
-
-    int length = size ; // determine the length of job history files to be displayed
-    if (pageno == maxPageNo) {
-      // find the number of files to be shown on the last page
-      int startOnLast = ((pageno - 1) * size) + 1;
-      length = jobFiles.length - startOnLast + 1;
-    }
-
-    // Display the search box
-    out.println("<form name=search><b> Filter (username:jobname) </b>"); // heading
-    out.println("<input type=text name=search size=\"20\" value=\"" + search + "\">"); // search box
-    out.println("<input type=submit value=\"Filter!\" onClick=\"showUserHistory(document.getElementById('search').value)\"></form>");
-    out.println("<span class=\"small\">Example: 'smith' will display jobs either submitted by user 'smith'. 'smith:sort' will display jobs from user 'smith' having 'sort' keyword in the jobname.</span>"); // example
-    out.println("<hr>");
-
-    //Show the status
-    int start = (pageno - 1) * size + 1;
-
-    // DEBUG
-    out.println("<!-- pageno : " + pageno + ", size : " + size + ", length : " + length + ", start : " + start + ", maxpg : " + maxPageNo + "-->");
-
-    out.println("<font size=5><b>Available Jobs in History </b></font>");
-    // display the number of jobs, start index, end index
-    out.println("(<i> <span class=\"small\">Displaying <b>" + length + "</b> jobs from <b>" + start + "</b> to <b>" + (start + length - 1) + "</b> out of <b>" + jobFiles.length + "</b> jobs");
-    if (!"".equals(user)) {
-      out.println(" for user <b>" + user + "</b>"); // show the user if present
-    }
-    if (!"".equals(jobname)) {
-      out.println(" with jobname having the keyword <b>" + jobname + "</b> in it."); // show the jobname keyword if present
-    }
-    out.print("</span></i>)");
-
-    // show the 'show-all' link
-    out.println(" [<span class=\"small\"><a href=\"jobhistory.jsp?pageno=-1&search=" + search + "\">show all</a></span>]");
-
-    // show the 'first-page' link
-    if (pageno > 1) {
-      out.println(" [<span class=\"small\"><a href=\"jobhistory.jsp?pageno=1&search=" + search + "\">first page</a></span>]");
-    } else {
-      out.println("[<span class=\"small\">first page]</span>");
-    }
-
-    // show the 'last-page' link
-    if (pageno < maxPageNo) {
-      out.println(" [<span class=\"small\"><a href=\"jobhistory.jsp?pageno=" + maxPageNo + "&search=" + search + "\">last page</a></span>]");
-    } else {
-      out.println("<span class=\"small\">[last page]</span>");
-    }
-
-    // sort the files on creation time.
-    Arrays.sort(jobFiles, new Comparator<Path>() {
-      public int compare(Path p1, Path p2) {
-        String dp1 = null;
-        String dp2 = null;
-        
-        try {
-          dp1 = JobHistory.JobInfo.decodeJobHistoryFileName(p1.getName());
-          dp2 = JobHistory.JobInfo.decodeJobHistoryFileName(p2.getName());
-        } catch (IOException ioe) {
-            throw new RuntimeException(ioe);
-        }
-                
-        String[] split1 = dp1.split("_");
-        String[] split2 = dp2.split("_");
-        
-        // compare job tracker start time
-        int res = new Date(Long.parseLong(split1[1])).compareTo(
-                             new Date(Long.parseLong(split2[1])));
-        if (res == 0) {
-          res = new Date(Long.parseLong(split1[3])).compareTo(
-                           new Date(Long.parseLong(split2[3])));
-        }
-        if (res == 0) {
-          Long l1 = Long.parseLong(split1[4]);
-          res = l1.compareTo(Long.parseLong(split2[4]));
-        }
-        return res;
-      }
-    });
-
-    out.println("<br><br>");
-
-    // print the navigation info (top)
-    printNavigation(pageno, size, maxPageNo, search, out);
-
-    out.print("<table align=center border=2 cellpadding=\"5\" cellspacing=\"2\">");
-    out.print("<tr>");
-    out.print("<td>Job tracker Host Name</td>" +
-              "<td>Job tracker Start time</td>" +
-              "<td>Job Id</td><td>Name</td><td>User</td>") ; 
-    out.print("</tr>"); 
-    
-    Set<String> displayedJobs = new HashSet<String>();
-    for (int i = start - 1; i < start + length - 1; ++i) {
-      Path jobFile = jobFiles[i];
-      
-      String decodedJobFileName = 
-          JobHistory.JobInfo.decodeJobHistoryFileName(jobFile.getName());
-
-      String[] jobDetails = decodedJobFileName.split("_");
-      String trackerHostName = jobDetails[0];
-      String trackerStartTime = jobDetails[1];
-      String jobId = jobDetails[2] + "_" +jobDetails[3] + "_" + jobDetails[4] ;
-      String userName = jobDetails[5];
-      String jobName = jobDetails[6];
-      
-      // Check if the job is already displayed. There can be multiple job 
-      // history files for jobs that have restarted
-      if (displayedJobs.contains(jobId)) {
-        continue;
-      } else {
-        displayedJobs.add(jobId);
-      }
-      
-      // Encode the logfile name again to cancel the decoding done by the browser
-      String encodedJobFileName = 
-          JobHistory.JobInfo.encodeJobHistoryFileName(jobFile.getName());
-%>
-<center>
-<%	
-      printJob(trackerHostName, trackerStartTime, jobId,
-               jobName, userName, new Path(jobFile.getParent(), encodedJobFileName), 
-               out) ; 
-%>
-</center> 
-<%
-    } // end while trackers 
-    out.print("</table>");
-
-    // show the navigation info (bottom)
-    printNavigation(pageno, size, maxPageNo, search, out);
-%>
-<%!
-    private void printJob(String trackerHostName, String trackerid,
-                          String jobId, String jobName,
-                          String user, Path logFile, JspWriter out)
-    throws IOException {
-      out.print("<tr>"); 
-      out.print("<td>" + trackerHostName + "</td>"); 
-      out.print("<td>" + new Date(Long.parseLong(trackerid)) + "</td>"); 
-      out.print("<td>" + "<a href=\"jobdetailshistory.jsp?jobid=" + jobId + 
-                "&logFile=" + logFile.toString() + "\">" + jobId + "</a></td>"); 
-      out.print("<td>" + jobName + "</td>"); 
-      out.print("<td>" + user + "</td>"); 
-      out.print("</tr>");
-    }
-
-    private void printNavigation(int pageno, int size, int max, String search, 
-                                 JspWriter out) throws IOException {
-      int numIndexToShow = 5; // num indexes to show on either side
-
-      //TODO check this on boundary cases
-      out.print("<center> <");
-
-      // show previous link
-      if (pageno > 1) {
-        out.println("<a href=\"jobhistory.jsp?pageno=" + (pageno - 1) + "&search=" + search + "\">Previous</a>");
-      }
-
-      // display the numbered index 1 2 3 4
-      int firstPage = pageno - numIndexToShow;
-      if (firstPage < 1) {
-        firstPage = 1; // boundary condition
-      }
-
-      int lastPage = pageno + numIndexToShow;
-      if (lastPage > max) {
-        lastPage = max; // boundary condition
-      }
-
-      // debug
-      out.println("<!--DEBUG : firstPage : " + firstPage + ", lastPage : " + lastPage + " -->");
-
-      for (int i = firstPage; i <= lastPage; ++i) {
-        if (i != pageno) {// needs hyperlink
-          out.println(" <a href=\"jobhistory.jsp?pageno=" + i + "&search=" + search + "\">" + i + "</a> ");
-        } else { // current page
-          out.println(i);
-        }
-      }
-
-      // show the next link
-      if (pageno < max) {
-        out.println("<a href=\"jobhistory.jsp?pageno=" + (pageno + 1) + "&search=" + search + "\">Next</a>");
-      }
-      out.print("></center>");
-    }
-%> 
-</body></html>

+ 0 - 89
src/webapps/job/jobqueue_details.jsp

@@ -1,89 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.util.Vector"
-  import="java.util.Collection"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.StringUtils"
-  import="org.apache.hadoop.util.ServletUtil"
-%>
-<%!
-private static final long serialVersionUID = 526456771152222127L; 
-%>
-<%
-  JobTracker tracker = 
-    (JobTracker) application.getAttribute("job.tracker");
-  String trackerName = 
-    StringUtils.simpleHostname(tracker.getJobTrackerMachine());
-  String queueName = 
-    StringUtils.escapeHTML(request.getParameter("queueName"));
-  TaskScheduler scheduler = tracker.getTaskScheduler();
-  Collection<JobInProgress> jobs = scheduler.getJobs(queueName);
-  JobQueueInfo schedInfo = tracker.getQueueInfo(queueName);
-%>
-<html>
-<head>
-<title>Queue details for <%=queueName!=null?queueName:""%> </title>
-<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-<script type="text/javascript" src="/static/jobtracker.js"></script>
-</head>
-<body>
-<% JSPUtil.processButtons(request, response, tracker); %>
-<%
-  String schedulingInfoString = schedInfo.getSchedulingInfo();
-%>
-<h1>Hadoop Job Queue Scheduling Information on 
-  <a href="jobtracker.jsp"><%=trackerName%></a>
-</h1>
-<div>
-Scheduling Information : <%= schedulingInfoString.replaceAll("\n","<br/>") %>
-</div>
-<hr/>
-<%
-if(jobs == null || jobs.isEmpty()) {
-%>
-<center>
-<h2> No Jobs found for the Queue :: <%=queueName!=null?queueName:""%> </h2>
-<hr/>
-</center>
-<%
-}else {
-%>
-<center>
-<h2> Job Summary for the Queue :: <%=queueName!=null?queueName:"" %> </h2>
-</center>
-<div style="text-align: center;text-indent: center;font-style: italic;">
-(In the order maintained by the scheduler)
-</div>
-<br/>
-<hr/>
-<%=
-  JSPUtil.generateJobTable("Job List", jobs, 30, 0)
-%>
-<hr>
-<% } %>
-
-<%
-out.println(ServletUtil.htmlFooter());
-%>
-

+ 0 - 154
src/webapps/job/jobtasks.jsp

@@ -1,154 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-  import="java.lang.Integer"
-  import="java.text.SimpleDateFormat"
-%>
-<%! static SimpleDateFormat dateFormat = new SimpleDateFormat("d-MMM-yyyy HH:mm:ss") ; %>
-<%
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  String trackerName = 
-           StringUtils.simpleHostname(tracker.getJobTrackerMachine());
-  String jobid = request.getParameter("jobid");
-  if (jobid == null) {
-    out.println("<h2>Missing 'jobid'!</h2>");
-    return;
-  }
-  String type = request.getParameter("type");
-  String pagenum = request.getParameter("pagenum");
-  String state = request.getParameter("state");
-  state = (state!=null) ? state : "all";
-  int pnum = Integer.parseInt(pagenum);
-  int next_page = pnum+1;
-  int numperpage = 2000;
-  JobID jobidObj = JobID.forName(jobid);
-  JobInProgress job = (JobInProgress) tracker.getJob(jobidObj);
-  JobProfile profile = (job != null) ? (job.getProfile()) : null;
-  JobStatus status = (job != null) ? (job.getStatus()) : null;
-  TaskReport[] reports = null;
-  int start_index = (pnum - 1) * numperpage;
-  int end_index = start_index + numperpage;
-  int report_len = 0;
-  if ("map".equals(type)) {
-    reports = (job != null) ? tracker.getMapTaskReports(jobidObj) : null;
-  } else if ("reduce".equals(type)) {
-    reports = (job != null) ? tracker.getReduceTaskReports(jobidObj) : null;
-  } else if ("cleanup".equals(type)) {
-    reports = (job != null) ? tracker.getCleanupTaskReports(jobidObj) : null;
-  } else if ("setup".equals(type)) {
-    reports = (job != null) ? tracker.getSetupTaskReports(jobidObj) : null;
-  }
-%>
-
-<html>
-  <head>
-    <title>Hadoop <%=type%> task list for <%=jobid%> on <%=trackerName%></title>
-    <link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-  </head>
-<body>
-<h1>Hadoop <%=type%> task list for 
-<a href="jobdetails.jsp?jobid=<%=jobid%>"><%=jobid%></a> on 
-<a href="jobtracker.jsp"><%=trackerName%></a></h1>
-<%
-    if (job == null) {
-    out.print("<b>Job " + jobid + " not found.</b><br>\n");
-    return;
-  }
-  // Filtering the reports if some filter is specified
-  if (!"all".equals(state)) {
-    List<TaskReport> filteredReports = new ArrayList<TaskReport>();
-    for (int i = 0; i < reports.length; ++i) {
-      if (("completed".equals(state) && reports[i].getCurrentStatus() == TIPStatus.COMPLETE) 
-          || ("running".equals(state) && reports[i].getCurrentStatus() == TIPStatus.RUNNING) 
-          || ("killed".equals(state) && reports[i].getCurrentStatus() == TIPStatus.KILLED) 
-          || ("pending".equals(state)  && reports[i].getCurrentStatus() == TIPStatus.PENDING)) {
-        filteredReports.add(reports[i]);
-      }
-    }
-    // using filtered reports instead of all the reports
-    reports = filteredReports.toArray(new TaskReport[0]);
-    filteredReports = null;
-  }
-  report_len = reports.length;
-  
-  if (report_len <= start_index) {
-    out.print("<b>No such tasks</b>");
-  } else {
-    out.print("<hr>");
-    out.print("<h2>" + Character.toUpperCase(state.charAt(0)) 
-              + state.substring(1).toLowerCase() + " Tasks</h2>");
-    out.print("<center>");
-    out.print("<table border=2 cellpadding=\"5\" cellspacing=\"2\">");
-    out.print("<tr><td align=\"center\">Task</td><td>Complete</td><td>Status</td>" +
-              "<td>Start Time</td><td>Finish Time</td><td>Errors</td><td>Counters</td></tr>");
-    if (end_index > report_len){
-        end_index = report_len;
-    }
-    for (int i = start_index ; i < end_index; i++) {
-          TaskReport report = reports[i];
-          out.print("<tr><td><a href=\"taskdetails.jsp?jobid=" + jobid + 
-                    "&tipid=" + report.getTaskID() + "\">"  + 
-                    report.getTaskID() + "</a></td>");
-         out.print("<td>" + StringUtils.formatPercent(report.getProgress(),2) +
-        		   ServletUtil.percentageGraph(report.getProgress() * 100f, 80) + "</td>");
-         out.print("<td>"  + report.getState() + "<br/></td>");
-         out.println("<td>" + StringUtils.getFormattedTimeWithDiff(dateFormat, report.getStartTime(),0) + "<br/></td>");
-         out.println("<td>" + StringUtils.getFormattedTimeWithDiff(dateFormat, 
-             report.getFinishTime(), report.getStartTime()) + "<br/></td>");
-         String[] diagnostics = report.getDiagnostics();
-         out.print("<td><pre>");
-         for (int j = 0; j < diagnostics.length ; j++) {
-             out.println(diagnostics[j]);
-         }
-         out.println("</pre><br/></td>");
-         out.println("<td>" + 
-             "<a href=\"taskstats.jsp?jobid=" + jobid + 
-             "&tipid=" + report.getTaskID() +
-             "\">" + report.getCounters().size() +
-             "</a></td></tr>");
-    }
-    out.print("</table>");
-    out.print("</center>");
-  }
-  if (end_index < report_len) {
-    out.print("<div style=\"text-align:right\">" + 
-              "<a href=\"jobtasks.jsp?jobid="+ jobid + "&type=" + type +
-              "&pagenum=" + next_page + "&state=" + state +
-              "\">" + "Next" + "</a></div>");
-  }
-  if (start_index != 0) {
-      out.print("<div style=\"text-align:right\">" + 
-                "<a href=\"jobtasks.jsp?jobid="+ jobid + "&type=" + type +
-                "&pagenum=" + (pnum -1) + "&state=" + state + "\">" + "Prev" + "</a></div>");
-  }
-%>
-
-<hr>
-<a href="jobtracker.jsp">Go back to JobTracker</a><br>
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 88
src/webapps/job/jobtaskshistory.jsp

@@ -1,88 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.SimpleDateFormat"
-  import="org.apache.hadoop.mapred.JobHistory.*"
-%>
-<jsp:include page="loadhistory.jsp">
-	<jsp:param name="jobid" value="<%=request.getParameter("jobid") %>"/>
-	<jsp:param name="logFile" value="<%=request.getParameter("logFile") %>"/>
-</jsp:include>
-<%!	
-  private static SimpleDateFormat dateFormat =
-                                    new SimpleDateFormat("d/MM HH:mm:ss") ; 
-%>
-
-<%	
-  String jobid = request.getParameter("jobid");
-  String logFile = request.getParameter("logFile");
-  String encodedLogFileName = JobHistory.JobInfo.encodeJobHistoryFilePath(logFile);
-  String taskStatus = request.getParameter("status"); 
-  String taskType = request.getParameter("taskType"); 
-  
-  JobHistory.JobInfo job = (JobHistory.JobInfo)request.
-                            getSession().getAttribute("job");
-  Map<String, JobHistory.Task> tasks = job.getAllTasks(); 
-%>
-<html>
-<body>
-<h2><%=taskStatus%> <%=taskType %> task list for <a href="jobdetailshistory.jsp?jobid=<%=jobid%>&&logFile=<%=encodedLogFileName%>"><%=jobid %> </a></h2>
-<center>
-<table border="2" cellpadding="5" cellspacing="2">
-<tr><td>Task Id</td><td>Start Time</td><td>Finish Time<br/></td><td>Error</td></tr>
-<%
-  for (JobHistory.Task task : tasks.values()) {
-    if (taskType.equals(task.get(Keys.TASK_TYPE))){
-      Map <String, TaskAttempt> taskAttempts = task.getTaskAttempts();
-      for (JobHistory.TaskAttempt taskAttempt : taskAttempts.values()) {
-        if (taskStatus.equals(taskAttempt.get(Keys.TASK_STATUS)) || 
-          taskStatus.equals("all")){
-          printTask(jobid, encodedLogFileName, taskAttempt, out); 
-        }
-      }
-    }
-  }
-%>
-</table>
-<%!
-  private void printTask(String jobid, String logFile,
-    JobHistory.TaskAttempt attempt, JspWriter out) throws IOException{
-    out.print("<tr>"); 
-    out.print("<td>" + "<a href=\"taskdetailshistory.jsp?jobid=" + jobid + 
-          "&logFile="+ logFile +"&taskid="+attempt.get(Keys.TASKID)+"\">" +
-          attempt.get(Keys.TASKID) + "</a></td>");
-    out.print("<td>" + StringUtils.getFormattedTimeWithDiff(dateFormat, 
-          attempt.getLong(Keys.START_TIME), 0 ) + "</td>");
-    out.print("<td>" + StringUtils.getFormattedTimeWithDiff(dateFormat, 
-          attempt.getLong(Keys.FINISH_TIME),
-          attempt.getLong(Keys.START_TIME) ) + "</td>");
-    out.print("<td>" + attempt.get(Keys.ERROR) + "</td>");
-    out.print("</tr>"); 
-  }
-%>
-</center>
-</body>
-</html>

+ 0 - 173
src/webapps/job/jobtracker.jsp

@@ -1,173 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="java.text.DecimalFormat"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-%>
-<%
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  ClusterStatus status = tracker.getClusterStatus();
-  String trackerName = 
-           StringUtils.simpleHostname(tracker.getJobTrackerMachine());
-  JobQueueInfo[] queues = tracker.getQueues();
-  Vector<JobInProgress> runningJobs = tracker.runningJobs();
-  Vector<JobInProgress> completedJobs = tracker.completedJobs();
-  Vector<JobInProgress> failedJobs = tracker.failedJobs();
-%>
-<%!
-  private static DecimalFormat percentFormat = new DecimalFormat("##0.00");
-  
-  public void generateSummaryTable(JspWriter out, ClusterStatus status,
-                                   JobTracker tracker) throws IOException {
-    String tasksPerNode = status.getTaskTrackers() > 0 ?
-      percentFormat.format(((double)(status.getMaxMapTasks() +
-                      status.getMaxReduceTasks())) / status.getTaskTrackers()):
-      "-";
-    out.print("<table border=\"1\" cellpadding=\"5\" cellspacing=\"0\">\n"+
-              "<tr><th>Maps</th><th>Reduces</th>" + 
-              "<th>Total Submissions</th>" +
-              "<th>Nodes</th><th>Map Task Capacity</th>" +
-              "<th>Reduce Task Capacity</th><th>Avg. Tasks/Node</th>" + 
-              "<th>Blacklisted Nodes</th>" +
-              "<th>Excluded Nodes</th></tr>\n");
-    out.print("<tr><td>" + status.getMapTasks() + "</td><td>" +
-              status.getReduceTasks() + "</td><td>" + 
-              tracker.getTotalSubmissions() +
-              "</td><td><a href=\"machines.jsp?type=active\">" +
-              status.getTaskTrackers() +
-              "</a></td><td>" + status.getMaxMapTasks() +
-              "</td><td>" + status.getMaxReduceTasks() +
-              "</td><td>" + tasksPerNode +
-              "</td><td><a href=\"machines.jsp?type=blacklisted\">" +
-              status.getBlacklistedTrackers() + "</a>" +
-              "</td><td><a href=\"machines.jsp?type=excluded\">" +
-              status.getNumExcludedNodes() + "</a>" +
-              "</td></tr></table>\n");
-
-    out.print("<br>");
-    if (tracker.hasRestarted()) {
-      out.print("<span class=\"small\"><i>");
-      if (tracker.hasRecovered()) {
-        out.print("The JobTracker got restarted and recovered back in " );
-        out.print(StringUtils.formatTime(tracker.getRecoveryDuration()));
-      } else {
-        out.print("The JobTracker got restarted and is still recovering");
-      }
-      out.print("</i></span>");
-    }
-  }%>
-
-
-<html>
-<head>
-<title><%= trackerName %> Hadoop Map/Reduce Administration</title>
-<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-<script type="text/javascript" src="/static/jobtracker.js"></script>
-</head>
-<body>
-
-<% JSPUtil.processButtons(request, response, tracker); %>
-
-<h1><%= trackerName %> Hadoop Map/Reduce Administration</h1>
-
-<div id="quicklinks">
-  <a href="#quicklinks" onclick="toggle('quicklinks-list'); return false;">Quick Links</a>
-  <ul id="quicklinks-list">
-    <li><a href="#scheduling_info">Scheduling Info</a></li>
-    <li><a href="#running_jobs">Running Jobs</a></li>
-    <li><a href="#completed_jobs">Completed Jobs</a></li>
-    <li><a href="#failed_jobs">Failed Jobs</a></li>
-    <li><a href="#local_logs">Local Logs</a></li>
-  </ul>
-</div>
-
-<b>State:</b> <%= status.getJobTrackerState() %><br>
-<b>Started:</b> <%= new Date(tracker.getStartTime())%><br>
-<b>Version:</b> <%= VersionInfo.getVersion()%>,
-                <%= VersionInfo.getRevision()%><br>
-<b>Compiled:</b> <%= VersionInfo.getDate()%> by 
-                 <%= VersionInfo.getUser()%> from
-                 <%= VersionInfo.getBranch()%><br>
-<b>Identifier:</b> <%= tracker.getTrackerIdentifier()%><br>                 
-                   
-<hr>
-<h2>Cluster Summary (Heap Size is <%= StringUtils.byteDesc(status.getUsedMemory()) %>/<%= StringUtils.byteDesc(status.getMaxMemory()) %>)</h2>
-<% 
- generateSummaryTable(out, status, tracker); 
-%>
-<hr>
-<h2 id="scheduling_info">Scheduling Information</h2>
-<table border="2" cellpadding="5" cellspacing="2">
-<thead style="font-weight: bold">
-<tr>
-<td> Queue Name </td>
-<td> Scheduling Information</td>
-</tr>
-</thead>
-<tbody>
-<%
-for(JobQueueInfo queue: queues) {
-  String queueName = queue.getQueueName();
-  String schedulingInformation = queue.getSchedulingInfo();
-  if(schedulingInformation == null || schedulingInformation.trim().equals("")) {
-    schedulingInformation = "NA";
-  }
-%>
-<tr>
-<td><a href="jobqueue_details.jsp?queueName=<%=queueName%>"><%=queueName%></a></td>
-<td><%=schedulingInformation.replaceAll("\n","<br/>") %>
-</td>
-</tr>
-<%
-}
-%>
-</tbody>
-</table>
-<hr/>
-<b>Filter (Jobid, Priority, User, Name)</b> <input type="text" id="filter" onkeyup="applyfilter()"> <br>
-<span class="small">Example: 'user:smith 3200' will filter by 'smith' only in the user field and '3200' in all fields</span>
-<hr>
-
-<h2 id="running_jobs">Running Jobs</h2>
-<%=JSPUtil.generateJobTable("Running", runningJobs, 30, 0)%>
-<hr>
-
-<h2 id="completed_jobs">Completed Jobs</h2>
-<%=JSPUtil.generateJobTable("Completed", completedJobs, 0, runningJobs.size())%>
-<hr>
-
-<h2 id="failed_jobs">Failed Jobs</h2>
-<%=JSPUtil.generateJobTable("Failed", failedJobs, 0, 
-    (runningJobs.size()+completedJobs.size()))%>
-<hr>
-
-<h2 id="local_logs">Local Logs</h2>
-<a href="logs/">Log</a> directory, <a href="jobhistory.jsp">
-Job Tracker History</a>
-
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 68
src/webapps/job/loadhistory.jsp

@@ -1,68 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.fs.*"
-  import="org.apache.hadoop.util.*"
-  import="javax.servlet.jsp.*"
-  import="java.text.SimpleDateFormat"  
-  import="org.apache.hadoop.mapred.JobHistory.*"
-%>
-<%
-    PathFilter jobLogFileFilter = new PathFilter() {
-      public boolean accept(Path path) {
-        return !(path.getName().endsWith(".xml"));
-      }
-    };
-    
-    FileSystem fs = (FileSystem) application.getAttribute("fileSys");
-    String jobId =  (String)request.getParameter("jobid");
-    JobHistory.JobInfo job = (JobHistory.JobInfo)
-                               request.getSession().getAttribute("job");
-    // if session attribute of JobInfo exists and is of different job's,
-    // then remove the attribute
-    // if the job has not yet finished, remove the attribute sothat it 
-    // gets refreshed.
-    boolean isJobComplete = false;
-    if (null != job) {
-      String jobStatus = job.get(Keys.JOB_STATUS);
-      isJobComplete = Values.SUCCESS.name() == jobStatus
-                      || Values.FAILED.name() == jobStatus
-                      || Values.KILLED.name() == jobStatus;
-    }
-    if (null != job && 
-       (!jobId.equals(job.get(Keys.JOBID)) 
-         || !isJobComplete)) {
-      // remove jobInfo from session, keep only one job in session at a time
-      request.getSession().removeAttribute("job"); 
-      job = null ; 
-    }
-	
-    if (null == job) {
-      String jobLogFile = (String)request.getParameter("logFile");
-      job = new JobHistory.JobInfo(jobId); 
-      DefaultJobHistoryParser.parseJobTasks(jobLogFile, job, fs) ; 
-      request.getSession().setAttribute("job", job);
-      request.getSession().setAttribute("fs", fs);
-    }
-%>

+ 0 - 138
src/webapps/job/machines.jsp

@@ -1,138 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="java.text.DecimalFormat"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-%>
-<%
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  String trackerName = 
-           StringUtils.simpleHostname(tracker.getJobTrackerMachine());
-  String type = request.getParameter("type");
-%>
-<%!
-  public void generateTaskTrackerTable(JspWriter out,
-                                       String type,
-                                       JobTracker tracker) throws IOException {
-    Collection c;
-    if (("blacklisted").equals(type)) {
-      out.println("<h2>Blacklisted Task Trackers</h2>");
-      c = tracker.blacklistedTaskTrackers();
-    } else if (("active").equals(type)) {
-      out.println("<h2>Active Task Trackers</h2>");
-      c = tracker.activeTaskTrackers();
-    } else {
-      out.println("<h2>Task Trackers</h2>");
-      c = tracker.taskTrackers();
-    }
-    if (c.size() == 0) {
-      out.print("There are currently no known " + type + " Task Trackers.");
-    } else {
-      out.print("<center>\n");
-      out.print("<table border=\"2\" cellpadding=\"5\" cellspacing=\"2\">\n");
-      out.print("<tr><td align=\"center\" colspan=\"6\"><b>Task Trackers</b></td></tr>\n");
-      out.print("<tr><td><b>Name</b></td><td><b>Host</b></td>" +
-                "<td><b># running tasks</b></td>" +
-                "<td><b>Max Map Tasks</b></td>" +
-                "<td><b>Max Reduce Tasks</b></td>" +
-                "<td><b>Failures</b></td>" +
-                "<td><b>Seconds since heartbeat</b></td></tr>\n");
-      int maxFailures = 0;
-      String failureKing = null;
-      for (Iterator it = c.iterator(); it.hasNext(); ) {
-        TaskTrackerStatus tt = (TaskTrackerStatus) it.next();
-        long sinceHeartbeat = System.currentTimeMillis() - tt.getLastSeen();
-        if (sinceHeartbeat > 0) {
-          sinceHeartbeat = sinceHeartbeat / 1000;
-        }
-        int numCurTasks = 0;
-        for (Iterator it2 = tt.getTaskReports().iterator(); it2.hasNext(); ) {
-          it2.next();
-          numCurTasks++;
-        }
-        int numFailures = tt.getFailures();
-        if (numFailures > maxFailures) {
-          maxFailures = numFailures;
-          failureKing = tt.getTrackerName();
-        }
-        out.print("<tr><td><a href=\"http://");
-        out.print(tt.getHost() + ":" + tt.getHttpPort() + "/\">");
-        out.print(tt.getTrackerName() + "</a></td><td>");
-        out.print(tt.getHost() + "</td><td>" + numCurTasks +
-                  "</td><td>" + tt.getMaxMapTasks() +
-                  "</td><td>" + tt.getMaxReduceTasks() + 
-                  "</td><td>" + numFailures + 
-                  "</td><td>" + sinceHeartbeat + "</td></tr>\n");
-      }
-      out.print("</table>\n");
-      out.print("</center>\n");
-      if (maxFailures > 0) {
-        out.print("Highest Failures: " + failureKing + " with " + maxFailures + 
-                  " failures<br>\n");
-      }
-    }
-  }
-
-  public void generateTableForExcludedNodes(JspWriter out, JobTracker tracker) 
-  throws IOException {
-    // excluded nodes
-    out.println("<h2>Excluded Nodes</h2>");
-    Collection<String> d = tracker.getExcludedNodes();
-    if (d.size() == 0) {
-      out.print("There are currently no excluded hosts.");
-    } else { 
-      out.print("<center>\n");
-      out.print("<table border=\"2\" cellpadding=\"5\" cellspacing=\"2\">\n");
-      out.print("<tr>");
-      out.print("<td><b>Host Name</b></td></tr>\n");
-      for (Iterator it = d.iterator(); it.hasNext(); ) {
-        String dt = (String)it.next();
-        out.print("<td>" + dt + "</td></tr>\n");
-      }
-      out.print("</table>\n");
-      out.print("</center>\n");
-    }
-  }
-%>
-
-<html>
-
-<title><%=trackerName%> Hadoop Machine List</title>
-
-<body>
-<h1><a href="jobtracker.jsp"><%=trackerName%></a> Hadoop Machine List</h1>
-
-<%
-  if (("excluded").equals(type)) {
-    generateTableForExcludedNodes(out, tracker);
-  } else {
-    generateTaskTrackerTable(out, type, tracker);
-  }
-%>
-
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 292
src/webapps/job/taskdetails.jsp

@@ -1,292 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.lang.String"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.SimpleDateFormat"  
-  import="org.apache.hadoop.util.*"
-%>
-<%!static SimpleDateFormat dateFormat = new SimpleDateFormat(
-      "d-MMM-yyyy HH:mm:ss");
-
-  private static final String PRIVATE_ACTIONS_KEY = "webinterface.private.actions";%>
-<%!private void printConfirm(JspWriter out, String jobid, String tipid,
-      String taskid, String action) throws IOException {
-    String url = "taskdetails.jsp?jobid=" + jobid + "&tipid=" + tipid
-        + "&taskid=" + taskid;
-    out.print("<html><head><META http-equiv=\"refresh\" content=\"15;URL="
-        + url + "\"></head>" + "<body><h3> Are you sure you want to kill/fail "
-        + taskid + " ?<h3><br><table border=\"0\"><tr><td width=\"100\">"
-        + "<form action=\"" + url + "\" method=\"post\">"
-        + "<input type=\"hidden\" name=\"action\" value=\"" + action + "\" />"
-        + "<input type=\"submit\" name=\"Kill/Fail\" value=\"Kill/Fail\" />"
-        + "</form>"
-        + "</td><td width=\"100\"><form method=\"post\" action=\"" + url
-        + "\"><input type=\"submit\" value=\"Cancel\" name=\"Cancel\""
-        + "/></form></td></tr></table></body></html>");
-  }%>
-<%
-    JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-    String jobid = request.getParameter("jobid");
-    String tipid = request.getParameter("tipid");
-    String taskid = request.getParameter("taskid");
-    JobID jobidObj = JobID.forName(jobid);
-    TaskID tipidObj = TaskID.forName(tipid);
-    TaskAttemptID taskidObj = TaskAttemptID.forName(taskid);
-    
-    JobInProgress job = (JobInProgress) tracker.getJob(jobidObj);
-    
-    boolean privateActions = JSPUtil.conf.getBoolean(PRIVATE_ACTIONS_KEY,
-        false);
-    if (privateActions) {
-      String action = request.getParameter("action");
-      if (action != null) {
-        if (action.equalsIgnoreCase("confirm")) {
-          String subAction = request.getParameter("subaction");
-          if (subAction == null)
-            subAction = "fail-task";
-          printConfirm(out, jobid, tipid, taskid, subAction);
-          return;
-        }
-        else if (action.equalsIgnoreCase("kill-task") 
-            && request.getMethod().equalsIgnoreCase("POST")) {
-          tracker.killTask(taskidObj, false);
-          //redirect again so that refreshing the page will not attempt to rekill the task
-          response.sendRedirect("/taskdetails.jsp?" + "&subaction=kill-task"
-              + "&jobid=" + jobid + "&tipid=" + tipid);
-        }
-        else if (action.equalsIgnoreCase("fail-task")
-            && request.getMethod().equalsIgnoreCase("POST")) {
-          tracker.killTask(taskidObj, true);
-          response.sendRedirect("/taskdetails.jsp?" + "&subaction=fail-task"
-              + "&jobid=" + jobid + "&tipid=" + tipid);
-        }
-      }
-    }
-    TaskInProgress tip = null;
-    if (job != null && tipidObj != null) {
-      tip = job.getTaskInProgress(tipidObj);
-    }
-    TaskStatus[] ts = null;
-    if (tip != null) { 
-      ts = tip.getTaskStatuses();
-    }
-    boolean isCleanupOrSetup = false;
-    if ( tip != null) {
-      isCleanupOrSetup = tip.isJobCleanupTask();
-      if (!isCleanupOrSetup) {
-        isCleanupOrSetup = tip.isJobSetupTask();
-      }
-    }
-%>
-
-
-<html>
-<head>
-  <link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-  <title>Hadoop Task Details</title>
-</head>
-<body>
-<h1>Job <a href="/jobdetails.jsp?jobid=<%=jobid%>"><%=jobid%></a></h1>
-
-<hr>
-
-<h2>All Task Attempts</h2>
-<center>
-<%
-    if (ts == null || ts.length == 0) {
-%>
-		<h3>No Task Attempts found</h3>
-<%
-    } else {
-%>
-<table border=2 cellpadding="5" cellspacing="2">
-<tr><td align="center">Task Attempts</td><td>Machine</td><td>Status</td><td>Progress</td><td>Start Time</td> 
-  <%
-   if (!ts[0].getIsMap() && !isCleanupOrSetup) {
-   %>
-<td>Shuffle Finished</td><td>Sort Finished</td>
-  <%
-  }
-  %>
-<td>Finish Time</td><td>Errors</td><td>Task Logs</td><td>Counters</td><td>Actions</td></tr>
-  <%
-    for (int i = 0; i < ts.length; i++) {
-      TaskStatus status = ts[i];
-      String taskTrackerName = status.getTaskTracker();
-      TaskTrackerStatus taskTracker = tracker.getTaskTracker(taskTrackerName);
-      out.print("<tr><td>" + status.getTaskID() + "</td>");
-      String taskAttemptTracker = null;
-      String cleanupTrackerName = null;
-      TaskTrackerStatus cleanupTracker = null;
-      String cleanupAttemptTracker = null;
-      boolean hasCleanupAttempt = false;
-      if (tip != null && tip.isCleanupAttempt(status.getTaskID())) {
-        cleanupTrackerName = tip.machineWhereCleanupRan(status.getTaskID());
-        cleanupTracker = tracker.getTaskTracker(cleanupTrackerName);
-        if (cleanupTracker != null) {
-          cleanupAttemptTracker = "http://" + cleanupTracker.getHost() + ":"
-            + cleanupTracker.getHttpPort();
-        }
-        hasCleanupAttempt = true;
-      }
-      out.print("<td>");
-      if (hasCleanupAttempt) {
-        out.print("Task attempt: ");
-      }
-      if (taskTracker == null) {
-        out.print(taskTrackerName);
-      } else {
-        taskAttemptTracker = "http://" + taskTracker.getHost() + ":"
-          + taskTracker.getHttpPort();
-        out.print("<a href=\"" + taskAttemptTracker + "\">"
-          + tracker.getNode(taskTracker.getHost()) + "</a>");
-      }
-      if (hasCleanupAttempt) {
-        out.print("<br/>Cleanup Attempt: ");
-        if (cleanupAttemptTracker == null ) {
-          out.print(cleanupTrackerName);
-        } else {
-          out.print("<a href=\"" + cleanupAttemptTracker + "\">"
-            + tracker.getNode(cleanupTracker.getHost()) + "</a>");
-        }
-      }
-      out.print("</td>");
-        out.print("<td>" + status.getRunState() + "</td>");
-        out.print("<td>" + StringUtils.formatPercent(status.getProgress(), 2)
-          + ServletUtil.percentageGraph(status.getProgress() * 100f, 80) + "</td>");
-        out.print("<td>"
-          + StringUtils.getFormattedTimeWithDiff(dateFormat, status
-          .getStartTime(), 0) + "</td>");
-        if (!ts[i].getIsMap() && !isCleanupOrSetup) {
-          out.print("<td>"
-          + StringUtils.getFormattedTimeWithDiff(dateFormat, status
-          .getShuffleFinishTime(), status.getStartTime()) + "</td>");
-        out.println("<td>"
-          + StringUtils.getFormattedTimeWithDiff(dateFormat, status
-          .getSortFinishTime(), status.getShuffleFinishTime())
-          + "</td>");
-        }
-        out.println("<td>"
-          + StringUtils.getFormattedTimeWithDiff(dateFormat, status
-          .getFinishTime(), status.getStartTime()) + "</td>");
-
-        out.print("<td><pre>");
-        String [] failures = tracker.getTaskDiagnostics(status.getTaskID());
-        if (failures == null) {
-          out.print("&nbsp;");
-        } else {
-          for(int j = 0 ; j < failures.length ; j++){
-            out.print(failures[j]);
-            if (j < (failures.length - 1)) {
-              out.print("\n-------\n");
-            }
-          }
-        }
-        out.print("</pre></td>");
-        out.print("<td>");
-        String taskLogUrl = null;
-        if (taskTracker != null ) {
-        	taskLogUrl = TaskLogServlet.getTaskLogUrl(taskTracker.getHost(),
-        						String.valueOf(taskTracker.getHttpPort()),
-        						status.getTaskID().toString());
-      	}
-        if (hasCleanupAttempt) {
-          out.print("Task attempt: <br/>");
-        }
-        if (taskLogUrl == null) {
-          out.print("n/a");
-        } else {
-          String tailFourKBUrl = taskLogUrl + "&start=-4097";
-          String tailEightKBUrl = taskLogUrl + "&start=-8193";
-          String entireLogUrl = taskLogUrl + "&all=true";
-          out.print("<a href=\"" + tailFourKBUrl + "\">Last 4KB</a><br/>");
-          out.print("<a href=\"" + tailEightKBUrl + "\">Last 8KB</a><br/>");
-          out.print("<a href=\"" + entireLogUrl + "\">All</a><br/>");
-        }
-        if (hasCleanupAttempt) {
-          out.print("Cleanup attempt: <br/>");
-          taskLogUrl = null;
-          if (cleanupTracker != null ) {
-        	taskLogUrl = TaskLogServlet.getTaskLogUrl(cleanupTracker.getHost(),
-                                String.valueOf(cleanupTracker.getHttpPort()),
-                                status.getTaskID().toString());
-      	  }
-          if (taskLogUrl == null) {
-            out.print("n/a");
-          } else {
-            String tailFourKBUrl = taskLogUrl + "&start=-4097&cleanup=true";
-            String tailEightKBUrl = taskLogUrl + "&start=-8193&cleanup=true";
-            String entireLogUrl = taskLogUrl + "&all=true&cleanup=true";
-            out.print("<a href=\"" + tailFourKBUrl + "\">Last 4KB</a><br/>");
-            out.print("<a href=\"" + tailEightKBUrl + "\">Last 8KB</a><br/>");
-            out.print("<a href=\"" + entireLogUrl + "\">All</a><br/>");
-          }
-        }
-        out.print("</td><td>" + "<a href=\"/taskstats.jsp?jobid=" + jobid
-          + "&tipid=" + tipid + "&taskid=" + status.getTaskID() + "\">"
-          + ((status.getCounters() != null) ? status.getCounters().size() : 0) + "</a></td>");
-        out.print("<td>");
-        if (privateActions
-          && status.getRunState() == TaskStatus.State.RUNNING) {
-        out.print("<a href=\"/taskdetails.jsp?action=confirm"
-          + "&subaction=kill-task" + "&jobid=" + jobid + "&tipid="
-          + tipid + "&taskid=" + status.getTaskID() + "\" > Kill </a>");
-        out.print("<br><a href=\"/taskdetails.jsp?action=confirm"
-          + "&subaction=fail-task" + "&jobid=" + jobid + "&tipid="
-          + tipid + "&taskid=" + status.getTaskID() + "\" > Fail </a>");
-        }
-        else
-          out.print("<pre>&nbsp;</pre>");
-        out.println("</td></tr>");
-      }
-  %>
-</table>
-</center>
-
-<%
-      if (ts[0].getIsMap()) {
-%>
-<h3>Input Split Locations</h3>
-<table border=2 cellpadding="5" cellspacing="2">
-<%
-        for (String split: StringUtils.split(tracker.getTip(
-                                         tipidObj).getSplitNodes())) {
-          out.println("<tr><td>" + split + "</td></tr>");
-        }
-%>
-</table>
-<%    
-      }
-    }
-%>
-
-<hr>
-<a href="jobdetails.jsp?jobid=<%=jobid%>">Go back to the job</a><br>
-<a href="jobtracker.jsp">Go back to JobTracker</a><br>
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 125
src/webapps/job/taskdetailshistory.jsp

@@ -1,125 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.SimpleDateFormat"
-  import="org.apache.hadoop.mapred.JobHistory.*"
-%>
-<jsp:include page="loadhistory.jsp">
-  <jsp:param name="jobid" value="<%=request.getParameter("jobid") %>"/>
-  <jsp:param name="jobTrackerId" value="<%=request.getParameter("jobTrackerId") %>"/>
-</jsp:include>
-<%!	private static SimpleDateFormat dateFormat = new SimpleDateFormat("d/MM HH:mm:ss") ; %>
-
-<%	
-  String jobid = request.getParameter("jobid");
-  String logFile = request.getParameter("logFile");
-  String encodedLogFileName = JobHistory.JobInfo.encodeJobHistoryFilePath(logFile);
-  String taskid = request.getParameter("taskid"); 
-  JobHistory.JobInfo job = (JobHistory.JobInfo)
-                              request.getSession().getAttribute("job");
-  JobHistory.Task task = job.getAllTasks().get(taskid); 
-  String type = task.get(Keys.TASK_TYPE);
-%>
-<html>
-<body>
-<h2><%=taskid %> attempts for <a href="jobdetailshistory.jsp?jobid=<%=jobid%>&&logFile=<%=encodedLogFileName%>"> <%=jobid %> </a></h2>
-<center>
-<table border="2" cellpadding="5" cellspacing="2">
-<tr><td>Task Id</td><td>Start Time</td>
-<%	
-  if (Values.REDUCE.name().equals(type)) {
-%>
-    <td>Shuffle Finished</td><td>Sort Finished</td>
-<%
-  }
-%>
-<td>Finish Time</td><td>Host</td><td>Error</td><td>Task Logs</td></tr>
-<%
-  for (JobHistory.TaskAttempt attempt : task.getTaskAttempts().values()) {
-    printTaskAttempt(attempt, type, out);
-  }
-%>
-</table>
-</center>
-<%	
-  if (Values.MAP.name().equals(type)) {
-%>
-<h3>Input Split Locations</h3>
-<table border="2" cellpadding="5" cellspacing="2">
-<%
-    for (String split : StringUtils.split(task.get(Keys.SPLITS)))
-    {
-      out.println("<tr><td>" + split + "</td></tr>");
-    }
-%>
-</table>    
-<%
-  }
-%>
-<%!
-  private void printTaskAttempt(JobHistory.TaskAttempt taskAttempt,
-                                String type, JspWriter out) 
-  throws IOException {
-    out.print("<tr>"); 
-    out.print("<td>" + taskAttempt.get(Keys.TASK_ATTEMPT_ID) + "</td>");
-    out.print("<td>" + StringUtils.getFormattedTimeWithDiff(dateFormat,
-              taskAttempt.getLong(Keys.START_TIME), 0 ) + "</td>"); 
-    if (Values.REDUCE.name().equals(type)) {
-      JobHistory.ReduceAttempt reduceAttempt = 
-            (JobHistory.ReduceAttempt)taskAttempt; 
-      out.print("<td>" + 
-                StringUtils.getFormattedTimeWithDiff(dateFormat, 
-                reduceAttempt.getLong(Keys.SHUFFLE_FINISHED), 
-                reduceAttempt.getLong(Keys.START_TIME)) + "</td>"); 
-      out.print("<td>" + StringUtils.getFormattedTimeWithDiff(dateFormat, 
-                reduceAttempt.getLong(Keys.SORT_FINISHED), 
-                reduceAttempt.getLong(Keys.SHUFFLE_FINISHED)) + "</td>"); 
-    }
-    out.print("<td>"+ StringUtils.getFormattedTimeWithDiff(dateFormat,
-              taskAttempt.getLong(Keys.FINISH_TIME), 
-              taskAttempt.getLong(Keys.START_TIME) ) + "</td>"); 
-    out.print("<td>" + taskAttempt.get(Keys.HOSTNAME) + "</td>");
-    out.print("<td>" + taskAttempt.get(Keys.ERROR) + "</td>");
-
-    // Print task log urls
-    out.print("<td>");	
-    String taskLogsUrl = JobHistory.getTaskLogsUrl(taskAttempt);
-    if (taskLogsUrl != null) {
-	    String tailFourKBUrl = taskLogsUrl + "&start=-4097";
-	    String tailEightKBUrl = taskLogsUrl + "&start=-8193";
-	    String entireLogUrl = taskLogsUrl + "&all=true";
-	    out.print("<a href=\"" + tailFourKBUrl + "\">Last 4KB</a><br/>");
-	    out.print("<a href=\"" + tailEightKBUrl + "\">Last 8KB</a><br/>");
-	    out.print("<a href=\"" + entireLogUrl + "\">All</a><br/>");
-    } else {
-        out.print("n/a");
-    }
-    out.print("</td>");
-    out.print("</tr>"); 
-  }
-%>
-</body>
-</html>

+ 0 - 106
src/webapps/job/taskstats.jsp

@@ -1,106 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.lang.String"
-  import="java.text.*"
-  import="java.util.*"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-  import="java.text.SimpleDateFormat"  
-%>
-<%
-  JobTracker tracker = (JobTracker) application.getAttribute("job.tracker");
-  String trackerName = 
-           StringUtils.simpleHostname(tracker.getJobTrackerMachine());
-  String jobid = request.getParameter("jobid");
-  String tipid = request.getParameter("tipid");
-  String taskid = request.getParameter("taskid");
-  JobID jobidObj = JobID.forName(jobid);
-  TaskID tipidObj = TaskID.forName(tipid);
-  TaskAttemptID taskidObj = TaskAttemptID.forName(taskid);
-  
-  JobInProgress job = (JobInProgress) tracker.getJob(jobidObj);
-  
-  Format decimal = new DecimalFormat();
-  Counters counters;
-  if (taskid == null) {
-    counters = tracker.getTipCounters(tipidObj);
-    taskid = tipid; // for page title etc
-  }
-  else {
-    TaskStatus taskStatus = tracker.getTaskStatus(taskidObj);
-    counters = taskStatus.getCounters();
-  }
-%>
-
-<html>
-  <head>
-    <title>Counters for <%=taskid%></title>
-  </head>
-<body>
-<h1>Counters for <%=taskid%></h1>
-
-<hr>
-
-<%
-  if ( counters == null ) {
-%>
-    <h3>No counter information found for this task</h3>
-<%
-  } else {    
-%>
-    <table>
-<%
-      for (String groupName : counters.getGroupNames()) {
-        Counters.Group group = counters.getGroup(groupName);
-        String displayGroupName = group.getDisplayName();
-%>
-        <tr>
-          <td colspan="3"><br/><b><%=displayGroupName%></b></td>
-        </tr>
-<%
-        for (Counters.Counter counter : group) {
-          String displayCounterName = counter.getDisplayName();
-          long value = counter.getCounter();
-%>
-          <tr>
-            <td width="50"></td>
-            <td><%=displayCounterName%></td>
-            <td align="right"><%=decimal.format(value)%></td>
-          </tr>
-<%
-        }
-      }
-%>
-    </table>
-<%
-  }
-%>
-
-<hr>
-<a href="jobdetails.jsp?jobid=<%=jobid%>">Go back to the job</a><br>
-<a href="jobtracker.jsp">Go back to JobTracker</a><br>
-<%
-out.println(ServletUtil.htmlFooter());
-%>

+ 0 - 29
src/webapps/secondary/index.html

@@ -1,29 +0,0 @@
-<meta HTTP-EQUIV="REFRESH" content="0;url=status.jsp"/>
-<html>
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<head><title>Hadoop Administration</title></head>
-
-<body>
-<h1>Hadoop Administration</h1>
-
-<ul> 
-  <li><a href="status.jsp">Status</a></li> 
-</ul>
-
-</body> 
-</html>

+ 0 - 39
src/webapps/secondary/status.jsp

@@ -1,39 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="org.apache.hadoop.util.*"
-%>
-
-<html>
-<link rel="stylesheet" type="text/css" href="/static/hadoop.css">
-<title>Hadoop SecondaryNameNode</title>
-    
-<body>
-<h1>SecondaryNameNode</h1>
-<%= JspHelper.getVersionTable() %>
-<hr />
-<pre>
-<%= application.getAttribute("secondary.name.node").toString() %>
-</pre>
-
-<br />
-<b><a href="/logs/">Logs</a></b>
-<%= ServletUtil.htmlFooter() %>

TEMPAT SAMPAH
src/webapps/static/hadoop-logo.jpg


+ 0 - 134
src/webapps/static/hadoop.css

@@ -1,134 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-body {
-  background-color : #ffffff;
-  font-family : sans-serif;
-}
-
-.small {
-  font-size : smaller;
-}
-
-div#dfsnodetable tr#row1, div#dfstable td#col1 {
-	font-weight : bolder;
-}
-
-div#dfstable td#col1 {
-	vertical-align : top;
-}
-
-div#dfstable td#col3 {
-	text-align : right;
-}
-
-div#dfsnodetable caption {
-	text-align : left;
-}
-
-div#dfsnodetable a#title {
-	font-size : larger;
-	font-weight : bolder;
-}
-
-div#dfsnodetable td, th {
-	border-bottom-style : none;
-        padding-bottom : 4px;
-        padding-top : 4px;       
-}
-
-div#dfsnodetable A:link, A:visited {
-	text-decoration : none;       
-}
-
-div#dfsnodetable th.header, th.headerASC, th.headerDSC {
-        padding-bottom : 8px;
-        padding-top : 8px;       
-}
-div#dfsnodetable th.header:hover, th.headerASC:hover, th.headerDSC:hover,
-                 td.name:hover {
-        text-decoration : underline;
-	cursor : pointer;
-}
-
-div#dfsnodetable td.blocks, td.size, td.pcused, td.adminstate, td.lastcontact {
-	text-align : right;
-}
-
-div#dfsnodetable .rowNormal .header {
-	background-color : #ffffff;
-}
-div#dfsnodetable .rowAlt, .headerASC, .headerDSC {
-	background-color : lightyellow;
-}
-
-.warning {
-        font-weight : bolder;
-        color : red;	
-}
-
-div#dfstable table {
-	white-space : pre;
-}
-
-div#dfsnodetable td, div#dfsnodetable th, div#dfstable td {
-	padding-left : 10px;
-	padding-right : 10px;
-}
-
-td.perc_filled {
-  background-color:#AAAAFF;
-}
-
-td.perc_nonfilled {
-  background-color:#FFFFFF;
-}
-
-line.taskgraphline {
-  stroke-width:1;stroke-linecap:round;  
-}
-
-#quicklinks {
-	margin: 0;
-	padding: 2px 4px;
-	position: fixed;
-	top: 0;
-	right: 0;
-	text-align: right;
-	background-color: #eee;
-	font-weight: bold;
-}
-
-#quicklinks ul {
-	margin: 0;
-	padding: 0;
-	list-style-type: none;
-	font-weight: normal;
-}
-
-#quicklinks ul {
-	display: none;
-}
-
-#quicklinks a {
-	font-size: smaller;
-	text-decoration: none;
-}
-
-#quicklinks ul a {
-	text-decoration: underline;
-}

+ 0 - 18
src/webapps/static/jobconf.xsl

@@ -1,18 +0,0 @@
-<?xml version="1.0"?>
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-<xsl:output method="html"/>
-<xsl:template match="configuration">
-<table border="1" align="center" >
-<tr>
- <th>name</th>
- <th>value</th>
-</tr>
-<xsl:for-each select="property">
-<tr>
-  <td width="35%"><b><xsl:value-of select="name"/></b></td>
-  <td width="65%"><xsl:value-of select="value"/></td>
-</tr>
-</xsl:for-each>
-</table>
-</xsl:template>
-</xsl:stylesheet>

+ 0 - 151
src/webapps/static/jobtracker.js

@@ -1,151 +0,0 @@
-/*
-* Licensed to the Apache Software Foundation (ASF) under one or more
-* contributor license agreements.  See the NOTICE file distributed with
-* this work for additional information regarding copyright ownership.
-* The ASF licenses this file to You under the Apache License, Version 2.0
-* (the "License"); you may not use this file except in compliance with
-* the License.  You may obtain a copy of the License at
-*
-*     http://www.apache.org/licenses/LICENSE-2.0
-*
-* Unless required by applicable law or agreed to in writing, software
-* distributed under the License is distributed on an "AS IS" BASIS,
-* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-* See the License for the specific language governing permissions and
-* limitations under the License.
-*/
-
-function checkButtonVerbage()
-{
-  var inputs = document.getElementsByName("jobCheckBox");
-  var check = getCheckStatus(inputs);
-
-  setCheckButtonVerbage(! check);
-}
-
-function selectAll()
-{
-  var inputs = document.getElementsByName("jobCheckBox");
-  var check = getCheckStatus(inputs);
-
-  for (var i in inputs) {
-    if ('jobCheckBox' == inputs[i].name) {
-      if ( inputs[i].parentNode.parentNode.style.display != 'none') {
-        inputs[i].checked = ! check;
-      }
-    }
-  }
-
-  setCheckButtonVerbage(check);
-}
-
-function getCheckStatus(inputs)
-{
-  var check = true;
-
-  for (var i in inputs) {
-    if ('jobCheckBox' == inputs[i].name) {
-      if ( inputs[i].parentNode.parentNode.style.display != 'none') {
-        check = (inputs[i].checked && check);
-      }
-    }
-  }
-
-  return check;
-}
-
-
-function setCheckButtonVerbage(check)
-{
-  var op = document.getElementById("checkEm");
-  op.value = check ? "Select All" : "Deselect All";
-}
-
-function applyfilter()
-{
-  var cols = ["job","priority","user","name"];
-  var nodes = [];
-  var filters = [];
-
-  for (var i = 0; i < cols.length; ++i) {
-    nodes[i] = document.getElementById(cols[i] + "_0" );
-  }
-
-  var filter = document.getElementById("filter");
-  filters = filter.value.split(' ');
-
-  var row = 0;
-  while ( nodes[0] != null ) {
-    //default display status
-    var display = true;
-
-    // for each filter
-    for (var filter_idx = 0; filter_idx < filters.length; ++filter_idx) {
-
-      // go check each column
-      if ((getDisplayStatus(nodes, filters[filter_idx], cols)) == 0) {
-        display = false;
-        break;
-      }
-    }
-
-    // set the display status
-    nodes[0].parentNode.style.display = display ? '' : 'none';
-
-    // next row
-    ++row;
-
-    // next set of controls
-    for (var i = 0; i < cols.length; ++i) {
-      nodes[i] = document.getElementById(cols[i] + "_" + row);
-    }
-  }  // while
-}
-
-function getDisplayStatus(nodes, filter, cols)
-{
-  var offset = filter.indexOf(':');
-
-  var search = offset != -1 ? filter.substring(offset + 1).toLowerCase() : filter.toLowerCase();
-
-  for (var col = 0; col < cols.length; ++col) {
-    // a column specific filter
-    if (offset != -1 ) {
-      var searchCol = filter.substring(0, offset).toLowerCase();
-
-         if (searchCol == cols[col]) {
-         // special case jobs to remove unnecessary stuff
-         return containsIgnoreCase(stripHtml(nodes[col].innerHTML), search);
-          }
-     } else if (containsIgnoreCase(stripHtml(nodes[col].innerHTML), filter)) {
-       return true;
-     }
-   }
-
-  return false;
-}
-
-function stripHtml(text)
-{
-  return text.replace(/<[^>]*>/g,'').replace(/&[^;]*;/g,'');
-}
-
-function containsIgnoreCase(haystack, needle)
-{
-  return haystack.toLowerCase().indexOf(needle.toLowerCase()) != -1;
-}
-
-function confirmAction()
-{
-  return confirm("Are you sure?");
-}
-
-function toggle(id)
-{
-  if ( document.getElementById(id).style.display != 'block') {
-    document.getElementById(id).style.display = 'block';
-  }
-  else {
-    document.getElementById(id).style.display = 'none';
-  }
-}

+ 0 - 17
src/webapps/task/index.html

@@ -1,17 +0,0 @@
-<!--
-   Licensed to the Apache Software Foundation (ASF) under one or more
-   contributor license agreements.  See the NOTICE file distributed with
-   this work for additional information regarding copyright ownership.
-   The ASF licenses this file to You under the Apache License, Version 2.0
-   (the "License"); you may not use this file except in compliance with
-   the License.  You may obtain a copy of the License at
-
-       http://www.apache.org/licenses/LICENSE-2.0
-
-   Unless required by applicable law or agreed to in writing, software
-   distributed under the License is distributed on an "AS IS" BASIS,
-   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-   See the License for the specific language governing permissions and
-   limitations under the License.
--->
-<meta HTTP-EQUIV="REFRESH" content="0;url=tasktracker.jsp"/>

+ 0 - 108
src/webapps/task/tasktracker.jsp

@@ -1,108 +0,0 @@
-<%
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file 
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-%>
-<%@ page
-  contentType="text/html; charset=UTF-8"
-  import="javax.servlet.*"
-  import="javax.servlet.http.*"
-  import="java.io.*"
-  import="java.util.*"
-  import="java.text.DecimalFormat"
-  import="org.apache.hadoop.mapred.*"
-  import="org.apache.hadoop.util.*"
-%>
-<%
-  TaskTracker tracker = (TaskTracker) application.getAttribute("task.tracker");
-  String trackerName = tracker.getName();
-%>
-
-<html>
-
-<title><%= trackerName %> Task Tracker Status</title>
-
-<body>
-<h1><%= trackerName %> Task Tracker Status</h1>
-<img src="/static/hadoop-logo.jpg"/><br>
-<b>Version:</b> <%= VersionInfo.getVersion()%>,
-                <%= VersionInfo.getRevision()%><br>
-<b>Compiled:</b> <%= VersionInfo.getDate()%> by 
-                 <%= VersionInfo.getUser()%> from
-                 <%= VersionInfo.getBranch()%><br>
-
-<h2>Running tasks</h2>
-<center>
-<table border=2 cellpadding="5" cellspacing="2">
-<tr><td align="center">Task Attempts</td><td>Status</td>
-    <td>Progress</td><td>Errors</td></tr>
-
-  <%
-     Iterator itr = tracker.getRunningTaskStatuses().iterator();
-     while (itr.hasNext()) {
-       TaskStatus status = (TaskStatus) itr.next();
-       out.print("<tr><td>" + status.getTaskID());
-       out.print("</td><td>" + status.getRunState()); 
-       out.print("</td><td>" + 
-                 StringUtils.formatPercent(status.getProgress(), 2));
-       out.print("</td><td><pre>" + status.getDiagnosticInfo() + "</pre></td>");
-       out.print("</tr>\n");
-     }
-  %>
-</table>
-</center>
-
-<h2>Non-Running Tasks</h2>
-<table border=2 cellpadding="5" cellspacing="2">
-<tr><td align="center">Task Attempts</td><td>Status</td>
-  <%
-    for(TaskStatus status: tracker.getNonRunningTasks()) {
-      out.print("<tr><td>" + status.getTaskID() + "</td>");
-      out.print("<td>" + status.getRunState() + "</td></tr>\n");
-    }
-  %>
-</table>
-
-
-<h2>Tasks from Running Jobs</h2>
-<center>
-<table border=2 cellpadding="5" cellspacing="2">
-<tr><td align="center">Task Attempts</td><td>Status</td>
-    <td>Progress</td><td>Errors</td></tr>
-
-  <%
-     itr = tracker.getTasksFromRunningJobs().iterator();
-     while (itr.hasNext()) {
-       TaskStatus status = (TaskStatus) itr.next();
-       out.print("<tr><td>" + status.getTaskID());
-       out.print("</td><td>" + status.getRunState()); 
-       out.print("</td><td>" + 
-                 StringUtils.formatPercent(status.getProgress(), 2));
-       out.print("</td><td><pre>" + status.getDiagnosticInfo() + "</pre></td>");
-       out.print("</tr>\n");
-     }
-  %>
-</table>
-</center>
-
-
-<h2>Local Logs</h2>
-<a href="/logs/">Log</a> directory
-
-<%
-out.println(ServletUtil.htmlFooter());
-%>