Переглянути джерело

HADOOP-1148. Re-indent all source code to consistently use two spaces per indent level.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@529410 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 роки тому
батько
коміт
dab0af46bf
100 змінених файлів з 11771 додано та 11729 видалено
  1. 3 0
      CHANGES.txt
  2. 12 12
      src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/WordHistogramCountDescriptor.java
  3. 2 2
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/JobBase.java
  4. 56 56
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/LongValueSum.java
  5. 1 1
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/UserDefinedValueAggregatorDescriptor.java
  6. 3 3
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorCombiner.java
  7. 56 56
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJobBase.java
  8. 3 3
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorMapper.java
  9. 4 4
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorReducer.java
  10. 2 2
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueHistogram.java
  11. 5 5
      src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
  12. 2 2
      src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java
  13. 11 11
      src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java
  14. 2 2
      src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java
  15. 12 12
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java
  16. 2 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java
  17. 17 17
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java
  18. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/MuxOutputFormat.java
  19. 101 101
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java
  20. 2 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java
  21. 21 21
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
  22. 3 3
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
  23. 3 3
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java
  24. 3 3
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java
  25. 132 132
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
  26. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamLineRecordReader.java
  27. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamSequenceRecordReader.java
  28. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java
  29. 12 12
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
  30. 105 105
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java
  31. 7 7
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java
  32. 34 34
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
  33. 12 12
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java
  34. 10 10
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
  35. 16 16
      src/examples/org/apache/hadoop/examples/ExampleDriver.java
  36. 76 76
      src/examples/org/apache/hadoop/examples/PiEstimator.java
  37. 7 7
      src/examples/org/apache/hadoop/examples/RandomWriter.java
  38. 8 8
      src/examples/org/apache/hadoop/examples/Sort.java
  39. 5 5
      src/examples/org/apache/hadoop/examples/WordCount.java
  40. 92 92
      src/java/org/apache/hadoop/dfs/Block.java
  41. 1 1
      src/java/org/apache/hadoop/dfs/BlockCommand.java
  42. 294 294
      src/java/org/apache/hadoop/dfs/ClientProtocol.java
  43. 321 321
      src/java/org/apache/hadoop/dfs/DFSAdmin.java
  44. 1236 1236
      src/java/org/apache/hadoop/dfs/DFSClient.java
  45. 105 105
      src/java/org/apache/hadoop/dfs/DFSFileInfo.java
  46. 703 664
      src/java/org/apache/hadoop/dfs/DataNode.java
  47. 11 11
      src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
  48. 46 46
      src/java/org/apache/hadoop/dfs/DatanodeInfo.java
  49. 38 38
      src/java/org/apache/hadoop/dfs/DatanodeProtocol.java
  50. 23 23
      src/java/org/apache/hadoop/dfs/DfsPath.java
  51. 121 121
      src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
  52. 114 114
      src/java/org/apache/hadoop/dfs/FSConstants.java
  53. 505 505
      src/java/org/apache/hadoop/dfs/FSDataset.java
  54. 638 638
      src/java/org/apache/hadoop/dfs/FSDirectory.java
  55. 27 27
      src/java/org/apache/hadoop/dfs/FSEditLog.java
  56. 33 33
      src/java/org/apache/hadoop/dfs/FSImage.java
  57. 1710 1710
      src/java/org/apache/hadoop/dfs/FSNamesystem.java
  58. 1 1
      src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java
  59. 2 2
      src/java/org/apache/hadoop/dfs/IncorrectVersionException.java
  60. 190 190
      src/java/org/apache/hadoop/dfs/JspHelper.java
  61. 49 49
      src/java/org/apache/hadoop/dfs/LocatedBlock.java
  62. 697 697
      src/java/org/apache/hadoop/dfs/NameNode.java
  63. 1 1
      src/java/org/apache/hadoop/dfs/PendingReplicationBlocks.java
  64. 374 374
      src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
  65. 2 2
      src/java/org/apache/hadoop/dfs/StreamFile.java
  66. 44 44
      src/java/org/apache/hadoop/dfs/TransferFsImage.java
  67. 114 114
      src/java/org/apache/hadoop/filecache/DistributedCache.java
  68. 9 9
      src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  69. 3 3
      src/java/org/apache/hadoop/fs/DF.java
  70. 37 37
      src/java/org/apache/hadoop/fs/FSInputStream.java
  71. 42 42
      src/java/org/apache/hadoop/fs/FileUtil.java
  72. 9 9
      src/java/org/apache/hadoop/fs/FilterFileSystem.java
  73. 937 937
      src/java/org/apache/hadoop/fs/FsShell.java
  74. 340 340
      src/java/org/apache/hadoop/fs/InMemoryFileSystem.java
  75. 57 57
      src/java/org/apache/hadoop/fs/LocalFileSystem.java
  76. 2 2
      src/java/org/apache/hadoop/fs/PositionedReadable.java
  77. 16 16
      src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  78. 2 2
      src/java/org/apache/hadoop/io/ArrayWritable.java
  79. 76 76
      src/java/org/apache/hadoop/io/BooleanWritable.java
  80. 1 1
      src/java/org/apache/hadoop/io/BytesWritable.java
  81. 181 181
      src/java/org/apache/hadoop/io/SequenceFile.java
  82. 3 3
      src/java/org/apache/hadoop/io/Text.java
  83. 25 25
      src/java/org/apache/hadoop/io/TwoDArrayWritable.java
  84. 2 2
      src/java/org/apache/hadoop/io/UTF8.java
  85. 65 65
      src/java/org/apache/hadoop/io/WritableUtils.java
  86. 1 1
      src/java/org/apache/hadoop/io/compress/CompressionCodec.java
  87. 1 1
      src/java/org/apache/hadoop/io/compress/DecompressorStream.java
  88. 4 4
      src/java/org/apache/hadoop/io/compress/DefaultCodec.java
  89. 9 9
      src/java/org/apache/hadoop/io/compress/GzipCodec.java
  90. 1 1
      src/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java
  91. 1 1
      src/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java
  92. 3 3
      src/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java
  93. 5 5
      src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  94. 10 10
      src/java/org/apache/hadoop/io/retry/RetryProxy.java
  95. 98 98
      src/java/org/apache/hadoop/ipc/SocketChannelOutputStream.java
  96. 7 7
      src/java/org/apache/hadoop/mapred/Counters.java
  97. 3 3
      src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java
  98. 655 655
      src/java/org/apache/hadoop/mapred/JobClient.java
  99. 835 835
      src/java/org/apache/hadoop/mapred/JobInProgress.java
  100. 85 85
      src/java/org/apache/hadoop/mapred/JobProfile.java

+ 3 - 0
CHANGES.txt

@@ -210,6 +210,9 @@ Trunk (unreleased changes)
 63. HADOOP-1258.  Fix TestCheckpoint test case to wait for 
     MiniDFSCluster to be active.  (Nigel Daley via tomwhite)
 
+64. HADOOP-1148.  Re-indent all Java source code to consistently use
+    two spaces per indent level.  (cutting)
+
 
 Release 0.12.3 - 2007-04-06
 

+ 12 - 12
src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/WordHistogramCountDescriptor.java

@@ -39,21 +39,21 @@ public class WordHistogramCountDescriptor extends ValueAggregatorBaseDescriptor
    * 
    * @return a list of the generated pairs.
    */
-    public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
-        String words[] = val.toString().split(" |\t");
-        ArrayList<Entry> retv = new ArrayList<Entry>();
-        for (int i = 0; i < words.length; i++) {
-            Text valCount = new Text(words[i] + "\t" + "1");
-            Entry en = generateEntry(VALUE_HISTOGRAM, "WORD_HISTOGRAM",
-                    valCount);
-            retv.add(en);
-        }
-        return retv;
+  public ArrayList<Entry> generateKeyValPairs(Object key, Object val) {
+    String words[] = val.toString().split(" |\t");
+    ArrayList<Entry> retv = new ArrayList<Entry>();
+    for (int i = 0; i < words.length; i++) {
+      Text valCount = new Text(words[i] + "\t" + "1");
+      Entry en = generateEntry(VALUE_HISTOGRAM, "WORD_HISTOGRAM",
+                               valCount);
+      retv.add(en);
     }
+    return retv;
+  }
 
-    public void configure(JobConf job) {
+  public void configure(JobConf job) {
 
-    }
+  }
 
 
 }

+ 2 - 2
src/contrib/abacus/src/java/org/apache/hadoop/abacus/JobBase.java

@@ -137,13 +137,13 @@ public abstract class JobBase implements Mapper, Reducer {
     while (iter.hasNext()) {
       Entry e = (Entry) iter.next();
       sb.append(e.getKey().toString()).append("\t").append(e.getValue())
-          .append("\n");
+        .append("\n");
     }
     iter = this.doubleCounters.entrySet().iterator();
     while (iter.hasNext()) {
       Entry e = (Entry) iter.next();
       sb.append(e.getKey().toString()).append("\t").append(e.getValue())
-          .append("\n");
+        .append("\n");
     }
     return sb.toString();
   }

+ 56 - 56
src/contrib/abacus/src/java/org/apache/hadoop/abacus/LongValueSum.java

@@ -27,69 +27,69 @@ import java.util.ArrayList;
  */
 public class LongValueSum implements ValueAggregator {
 
-    long sum = 0;
+  long sum = 0;
     
-    /**
-     *  the default constructor
-     *
-     */
-    public LongValueSum() {
-        reset();
-    }
+  /**
+   *  the default constructor
+   *
+   */
+  public LongValueSum() {
+    reset();
+  }
 
-    /**
-     * add a value to the aggregator
-     * 
-     * @param val
-     *          an object whose string representation represents a long value.
-     * 
-     */
-    public void addNextValue(Object val) {
-        this.sum += Long.parseLong(val.toString());
-    }
+  /**
+   * add a value to the aggregator
+   * 
+   * @param val
+   *          an object whose string representation represents a long value.
+   * 
+   */
+  public void addNextValue(Object val) {
+    this.sum += Long.parseLong(val.toString());
+  }
     
-    /**
-     * add a value to the aggregator
-     * 
-     * @param val
-     *          a long value.
-     * 
-     */
-    public void addNextValue(long val) {
-        this.sum += val;
-    }
+  /**
+   * add a value to the aggregator
+   * 
+   * @param val
+   *          a long value.
+   * 
+   */
+  public void addNextValue(long val) {
+    this.sum += val;
+  }
     
-    /**
-     * @return the aggregated value
-     */
-    public long getSum() {
-        return this.sum;
-    }
+  /**
+   * @return the aggregated value
+   */
+  public long getSum() {
+    return this.sum;
+  }
     
-    /**
-     * @return the string representation of the aggregated value
-     */
-    public String getReport() {
-        return ""+sum;
-    }
+  /**
+   * @return the string representation of the aggregated value
+   */
+  public String getReport() {
+    return ""+sum;
+  }
 
-    /**
-     * reset the aggregator
-     */
-    public void reset() {
-        sum = 0;
-    }
+  /**
+   * reset the aggregator
+   */
+  public void reset() {
+    sum = 0;
+  }
 
-    /**
-     * @return return an array of one element. The element is a string
-     *         representation of the aggregated value. The return value is
-     *         expected to be used by the a combiner.
-     */
-    public ArrayList getCombinerOutput() {
-        ArrayList retv = new ArrayList(1);
-        retv.add(getReport());
-        return retv;
-    }
+  /**
+   * @return return an array of one element. The element is a string
+   *         representation of the aggregated value. The return value is
+   *         expected to be used by the a combiner.
+   */
+  public ArrayList getCombinerOutput() {
+    ArrayList retv = new ArrayList(1);
+    retv.add(getReport());
+    return retv;
+  }
 }
 
 

+ 1 - 1
src/contrib/abacus/src/java/org/apache/hadoop/abacus/UserDefinedValueAggregatorDescriptor.java

@@ -100,7 +100,7 @@ public class UserDefinedValueAggregatorDescriptor implements
    */
   public String toString() {
     return "UserDefinedValueAggregatorDescriptor with class name:" + "\t"
-        + this.className;
+      + this.className;
   }
 
   /**

+ 3 - 3
src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorCombiner.java

@@ -47,12 +47,12 @@ public class ValueAggregatorCombiner extends ValueAggregatorJobBase {
    * @param output to collect combined values
    */
   public void reduce(WritableComparable key, Iterator values,
-      OutputCollector output, Reporter reporter) throws IOException {
+                     OutputCollector output, Reporter reporter) throws IOException {
     String keyStr = key.toString();
     int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
     String type = keyStr.substring(0, pos);
     ValueAggregator aggregator = ValueAggregatorBaseDescriptor
-        .generateValueAggregator(type);
+      .generateValueAggregator(type);
     if (aggregator == null) {
       LOG.info(key.toString());
     }
@@ -84,7 +84,7 @@ public class ValueAggregatorCombiner extends ValueAggregatorJobBase {
    *
    */
   public void map(WritableComparable arg0, Writable arg1, OutputCollector arg2,
-      Reporter arg3) throws IOException {
+                  Reporter arg3) throws IOException {
     throw new IOException ("should not be called\n");
   }
 }

+ 56 - 56
src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJobBase.java

@@ -31,70 +31,70 @@ import org.apache.hadoop.mapred.JobConf;
  */
 public abstract class ValueAggregatorJobBase extends JobBase {
  
-    protected ArrayList aggregatorDescriptorList = null;
+  protected ArrayList aggregatorDescriptorList = null;
         
-    public void configure(JobConf job) {
-        super.configure(job);
+  public void configure(JobConf job) {
+    super.configure(job);
         
-        setLongValue("totalCount", 0);
-        setLongValue("errorCount", 0);
-        setLongValue("collectedCount", 0);
-        setLongValue("groupCount", 0);
+    setLongValue("totalCount", 0);
+    setLongValue("errorCount", 0);
+    setLongValue("collectedCount", 0);
+    setLongValue("groupCount", 0);
         
-        this.initializeMySpec(job);
-        this.logSpec();
-    }
+    this.initializeMySpec(job);
+    this.logSpec();
+  }
 
-    private static ValueAggregatorDescriptor getValueAggregatorDescriptor(
-            String spec, JobConf job) {
-        if (spec == null)
-            return null;
-        String[] segments = spec.split(",", -1);
-        String type = segments[0];
-        if (type.compareToIgnoreCase("UserDefined") == 0) {
-            String className = segments[1];
-            return new UserDefinedValueAggregatorDescriptor(className, job);
-        } 
-        return null;
-    }
+  private static ValueAggregatorDescriptor getValueAggregatorDescriptor(
+                                                                        String spec, JobConf job) {
+    if (spec == null)
+      return null;
+    String[] segments = spec.split(",", -1);
+    String type = segments[0];
+    if (type.compareToIgnoreCase("UserDefined") == 0) {
+      String className = segments[1];
+      return new UserDefinedValueAggregatorDescriptor(className, job);
+    } 
+    return null;
+  }
 
-    private static ArrayList getAggregatorDescriptors(JobConf job) {
-        String advn = "aggregator.descriptor";
-        int num = job.getInt(advn + ".num", 0);
-        ArrayList retv = new ArrayList(num);
-        for (int i = 0; i < num; i++) {
-            String spec = job.get(advn + "." + i);
-            ValueAggregatorDescriptor ad = getValueAggregatorDescriptor(spec, job);
-            if (ad != null) {
-                retv.add(ad);
-            }
-        }
-        return retv;
+  private static ArrayList getAggregatorDescriptors(JobConf job) {
+    String advn = "aggregator.descriptor";
+    int num = job.getInt(advn + ".num", 0);
+    ArrayList retv = new ArrayList(num);
+    for (int i = 0; i < num; i++) {
+      String spec = job.get(advn + "." + i);
+      ValueAggregatorDescriptor ad = getValueAggregatorDescriptor(spec, job);
+      if (ad != null) {
+        retv.add(ad);
+      }
     }
+    return retv;
+  }
     
-    private void initializeMySpec(JobConf job) {
-        this.aggregatorDescriptorList = getAggregatorDescriptors(job);
-        if (this.aggregatorDescriptorList.size() == 0) {
-            this.aggregatorDescriptorList.add(new UserDefinedValueAggregatorDescriptor(
-                    ValueAggregatorBaseDescriptor.class.getCanonicalName(), job));
-        }
+  private void initializeMySpec(JobConf job) {
+    this.aggregatorDescriptorList = getAggregatorDescriptors(job);
+    if (this.aggregatorDescriptorList.size() == 0) {
+      this.aggregatorDescriptorList.add(new UserDefinedValueAggregatorDescriptor(
+                                                                                 ValueAggregatorBaseDescriptor.class.getCanonicalName(), job));
     }
+  }
     
-    protected void logSpec() {
-        StringBuffer sb = new StringBuffer();
-        sb.append("\n");
-        if (aggregatorDescriptorList == null) {
-            sb.append(" aggregatorDescriptorList: null");
-        } else {
-            sb.append(" aggregatorDescriptorList: ");
-            for (int i = 0; i < aggregatorDescriptorList.size(); i++) {
-                sb.append(" ").append(aggregatorDescriptorList.get(i).toString());
-            }
-        }      
-        LOG.info(sb.toString());
-    }
+  protected void logSpec() {
+    StringBuffer sb = new StringBuffer();
+    sb.append("\n");
+    if (aggregatorDescriptorList == null) {
+      sb.append(" aggregatorDescriptorList: null");
+    } else {
+      sb.append(" aggregatorDescriptorList: ");
+      for (int i = 0; i < aggregatorDescriptorList.size(); i++) {
+        sb.append(" ").append(aggregatorDescriptorList.get(i).toString());
+      }
+    }      
+    LOG.info(sb.toString());
+  }
 
-    public void close() throws IOException {
-        report();
-    }
+  public void close() throws IOException {
+    report();
+  }
 }

+ 3 - 3
src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorMapper.java

@@ -37,7 +37,7 @@ public class ValueAggregatorMapper extends ValueAggregatorJobBase {
    *  list to generate aggregation id/value pairs and emit them.
    */
   public void map(WritableComparable key, Writable value,
-      OutputCollector output, Reporter reporter) throws IOException {
+                  OutputCollector output, Reporter reporter) throws IOException {
 
     addLongValue("groupCount", 1);
     Iterator iter = this.aggregatorDescriptorList.iterator();
@@ -47,7 +47,7 @@ public class ValueAggregatorMapper extends ValueAggregatorJobBase {
       while (ens.hasNext()) {
         Entry en = ens.next();
         output.collect((WritableComparable) en.getKey(), (Writable) en
-            .getValue());
+                       .getValue());
         addLongValue("collectedCount", 1);
       }
     }
@@ -61,7 +61,7 @@ public class ValueAggregatorMapper extends ValueAggregatorJobBase {
    * Do nothing. Should not be called.
    */
   public void reduce(WritableComparable arg0, Iterator arg1,
-      OutputCollector arg2, Reporter arg3) throws IOException {
+                     OutputCollector arg2, Reporter arg3) throws IOException {
     throw new IOException ("should not be called\n");
   }
 }

+ 4 - 4
src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorReducer.java

@@ -45,16 +45,16 @@ public class ValueAggregatorReducer extends ValueAggregatorJobBase {
    * @value the values to be aggregated
    */
   public void reduce(WritableComparable key, Iterator values,
-      OutputCollector output, Reporter reporter) throws IOException {
+                     OutputCollector output, Reporter reporter) throws IOException {
     addLongValue("groupCount", 1);
     String keyStr = key.toString();
     int pos = keyStr.indexOf(ValueAggregatorDescriptor.TYPE_SEPARATOR);
     String type = keyStr.substring(0, pos);
     keyStr = keyStr.substring(pos
-        + ValueAggregatorDescriptor.TYPE_SEPARATOR.length());
+                              + ValueAggregatorDescriptor.TYPE_SEPARATOR.length());
 
     ValueAggregator aggregator = ValueAggregatorBaseDescriptor
-        .generateValueAggregator(type);
+      .generateValueAggregator(type);
     while (values.hasNext()) {
       addLongValue("totalCount", 1);
       aggregator.addNextValue(values.next());
@@ -73,7 +73,7 @@ public class ValueAggregatorReducer extends ValueAggregatorJobBase {
    * Do nothing. Should not be called
    */
   public void map(WritableComparable arg0, Writable arg1, OutputCollector arg2,
-      Reporter arg3) throws IOException {
+                  Reporter arg3) throws IOException {
     throw new IOException ("should not be called\n");
   }
 }

+ 2 - 2
src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueHistogram.java

@@ -96,7 +96,7 @@ public class ValueHistogram implements ValueAggregator {
       }
       acc += nextVal * (j - i);
       //sbVal.append("\t").append(nextVal).append("\t").append(j - i)
-          //.append("\n");
+      //.append("\n");
       i = j;
     }
     double average = 0.0;
@@ -137,7 +137,7 @@ public class ValueHistogram implements ValueAggregator {
       Object val = en.getKey();
       Long count = (Long) en.getValue();
       sb.append("\t").append(val.toString()).append("\t").append(
-          count.longValue()).append("\n");
+                                                                 count.longValue()).append("\n");
     }
     return sb.toString();
   }

+ 5 - 5
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java

@@ -97,7 +97,7 @@ public class DataJoinJob {
     job.setOutputPath(new Path(outputDir));
     job.setOutputFormat(outputFormat);
     SequenceFile.setCompressionType(job,
-        SequenceFile.CompressionType.BLOCK);
+                                    SequenceFile.CompressionType.BLOCK);
     job.setMapOutputKeyClass(Text.class);
     job.setMapOutputValueClass(mapoutputValueClass);
     job.setOutputKeyClass(Text.class);
@@ -107,7 +107,7 @@ public class DataJoinJob {
     job.setNumMapTasks(1);
     job.setNumReduceTasks(numOfReducers);
     job.setLong("ultjoin.maxNumOfValuesPerGroup",
-        maxNumOfValuesPerGroup);
+                maxNumOfValuesPerGroup);
     job.set("mapred.child.java.opts", "-Xmx1024m");
     job.setKeepFailedTaskFiles(true);
     return job;
@@ -153,9 +153,9 @@ public class DataJoinJob {
     boolean success;
     if (args.length < 7 || args.length > 9) {
       System.out.println("usage: DataJoinJob " + "inputdirs outputdir "
-          + "numofParts " + "mapper_class " + "reducer_class "
-          + "map_output_value_class "
-          + "output_value_class [maxNumOfValuesPerGroup [descriptionOfJob]]]");
+                         + "numofParts " + "mapper_class " + "reducer_class "
+                         + "map_output_value_class "
+                         + "output_value_class [maxNumOfValuesPerGroup [descriptionOfJob]]]");
       System.exit(-1);
     }
 

+ 2 - 2
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinMapperBase.java

@@ -90,7 +90,7 @@ public abstract class DataJoinMapperBase extends JobBase {
   protected abstract Text generateGroupKey(TaggedMapOutput aRecord);
 
   public void map(WritableComparable key, Writable value,
-      OutputCollector output, Reporter reporter) throws IOException {
+                  OutputCollector output, Reporter reporter) throws IOException {
     if (this.reporter == null) {
       this.reporter = reporter;
     }
@@ -116,7 +116,7 @@ public abstract class DataJoinMapperBase extends JobBase {
   }
 
   public void reduce(WritableComparable arg0, Iterator arg1,
-      OutputCollector arg2, Reporter arg3) throws IOException {
+                     OutputCollector arg2, Reporter arg3) throws IOException {
     // TODO Auto-generated method stub
 
   }

+ 11 - 11
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinReducerBase.java

@@ -69,7 +69,7 @@ public abstract class DataJoinReducerBase extends JobBase {
     super.configure(job);
     this.job = job;
     this.maxNumOfValuesPerGroup = job.getLong("ultjoin.maxNumOfValuesPerGroup",
-        100);
+                                              100);
   }
 
   /**
@@ -92,7 +92,7 @@ public abstract class DataJoinReducerBase extends JobBase {
    * @return
    */
   private SortedMap<Object, ResetableIterator> regroup(Writable key,
-      Iterator arg1, Reporter reporter) throws IOException {
+                                                       Iterator arg1, Reporter reporter) throws IOException {
     this.numOfValues = 0;
     SortedMap<Object, ResetableIterator> retv = new TreeMap<Object, ResetableIterator>();
     TaggedMapOutput aRecord = null;
@@ -101,7 +101,7 @@ public abstract class DataJoinReducerBase extends JobBase {
       this.numOfValues += 1;
       if (this.numOfValues % 100 == 0) {
         reporter.setStatus("key: " + key.toString() + " numOfValues: "
-            + this.numOfValues);
+                           + this.numOfValues);
       }
       if (this.numOfValues > this.maxNumOfValuesPerGroup) {
         continue;
@@ -117,13 +117,13 @@ public abstract class DataJoinReducerBase extends JobBase {
     if (this.numOfValues > this.largestNumOfValues) {
       this.largestNumOfValues = numOfValues;
       LOG.info("key: " + key.toString() + " this.largestNumOfValues: "
-          + this.largestNumOfValues);
+               + this.largestNumOfValues);
     }
     return retv;
   }
 
   public void reduce(WritableComparable key, Iterator values,
-      OutputCollector output, Reporter reporter) throws IOException {
+                     OutputCollector output, Reporter reporter) throws IOException {
     if (this.reporter == null) {
       this.reporter = reporter;
     }
@@ -152,7 +152,7 @@ public abstract class DataJoinReducerBase extends JobBase {
    * @throws IOException
    */
   protected void collect(WritableComparable key, TaggedMapOutput aRecord,
-      OutputCollector output, Reporter reporter) throws IOException {
+                         OutputCollector output, Reporter reporter) throws IOException {
     this.collected += 1;
     addLongValue("collectedCount", 1);
     if (aRecord != null && this.collected % 1 == 0) {
@@ -174,8 +174,8 @@ public abstract class DataJoinReducerBase extends JobBase {
    * @throws IOException
    */
   private void joinAndCollect(Object[] tags, ResetableIterator[] values,
-      WritableComparable key, OutputCollector output, Reporter reporter)
-      throws IOException {
+                              WritableComparable key, OutputCollector output, Reporter reporter)
+    throws IOException {
     if (values.length < 1) {
       return;
     }
@@ -199,8 +199,8 @@ public abstract class DataJoinReducerBase extends JobBase {
    * @throws IOException
    */
   private void joinAndCollect(Object[] tags, ResetableIterator[] values,
-      int pos, Object[] partialList, WritableComparable key,
-      OutputCollector output, Reporter reporter) throws IOException {
+                              int pos, Object[] partialList, WritableComparable key,
+                              OutputCollector output, Reporter reporter) throws IOException {
 
     if (values.length == pos) {
       // get a value from each source. Combine them
@@ -232,7 +232,7 @@ public abstract class DataJoinReducerBase extends JobBase {
   protected abstract TaggedMapOutput combine(Object[] tags, Object[] values);
 
   public void map(WritableComparable arg0, Writable arg1, OutputCollector arg2,
-      Reporter arg3) throws IOException {
+                  Reporter arg3) throws IOException {
     // TODO Auto-generated method stub
 
   }

+ 2 - 2
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/JobBase.java

@@ -149,13 +149,13 @@ public abstract class JobBase implements Mapper, Reducer {
     while (iter.hasNext()) {
       Entry e = (Entry) iter.next();
       sb.append(e.getKey().toString()).append("\t").append(e.getValue())
-          .append("\n");
+        .append("\n");
     }
     iter = this.doubleCounters.entrySet().iterator();
     while (iter.hasNext()) {
       Entry e = (Entry) iter.next();
       sb.append(e.getKey().toString()).append("\t").append(e.getValue())
-          .append("\n");
+        .append("\n");
     }
     return sb.toString();
   }

+ 12 - 12
src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java

@@ -77,7 +77,7 @@ class CompoundDirSpec {
     if (false == isInputSpec_) {
       if (msup > 1) {
         throw new IllegalStateException("A -output spec cannot use merged streams ('" + MERGE_SEP
-            + "' delimiter)");
+                                        + "' delimiter)");
       }
     }
     for (int m = 0; m < msup; m++) {
@@ -121,7 +121,7 @@ class CompoundDirSpec {
 
   void throwBadNumPrimaryInputSpecs() throws IllegalStateException {
     String msg = "A compound -input spec needs exactly one primary path prefixed with "
-        + PRIMARY_PREFIX;
+      + PRIMARY_PREFIX;
     msg += ":\n";
     msg += toTableString();
     throw new IllegalStateException(msg);
@@ -151,13 +151,13 @@ class CompoundDirSpec {
   }
   
   /*
-   Example input spec in table form:
-   <1 +[/input/part-00] 
-   <2  [/input/part-01] 
-   <3  [/input/part-02] 
-   Example output spec in table form:
-   +[/my.output] 
-   */
+    Example input spec in table form:
+    <1 +[/input/part-00] 
+    <2  [/input/part-01] 
+    <3  [/input/part-02] 
+    Example output spec in table form:
+    +[/my.output] 
+  */
   public String toTableString() {
     StringBuffer buf = new StringBuffer();
     int maxWid = 0;
@@ -204,9 +204,9 @@ class CompoundDirSpec {
   }
 
   /** 
-   @see #primaryRow 
-   @see #primaryCol
-   */
+      @see #primaryRow 
+      @see #primaryCol
+  */
   public String[][] getPaths() {
     return paths_;
   }

+ 2 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java

@@ -41,8 +41,8 @@ public class Environment extends Properties {
     if (OS.indexOf("Windows") > -1) {
       command = "cmd /C set";
     } else if (lowerOs.indexOf("ix") > -1 || lowerOs.indexOf("linux") > -1
-        || lowerOs.indexOf("freebsd") > -1 || lowerOs.indexOf("sunos") > -1
-        || lowerOs.indexOf("solaris") > -1 || lowerOs.indexOf("hp-ux") > -1) {
+               || lowerOs.indexOf("freebsd") > -1 || lowerOs.indexOf("sunos") > -1
+               || lowerOs.indexOf("solaris") > -1 || lowerOs.indexOf("hp-ux") > -1) {
       command = "env";
     } else if (lowerOs.startsWith("mac os x")) {
       command = "env";

+ 17 - 17
src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java

@@ -80,10 +80,10 @@ public class MergerInputFormat extends InputFormatBase {
   }
 
   /** Delegate to the primary InputFormat. 
-   Force full-file splits since there's no index to sync secondaries.
-   (and if there was, this index may need to be created for the first time
-   full file at a time...    )
-   */
+      Force full-file splits since there's no index to sync secondaries.
+      (and if there was, this index may need to be created for the first time
+      full file at a time...    )
+  */
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
     return ((StreamInputFormat) primary_).getSplits(job, numSplits);
   }
@@ -119,20 +119,20 @@ public class MergerInputFormat extends InputFormatBase {
   }
 
   /*
-   private FileSplit relatedSplit(FileSplit primarySplit, int i, CompoundDirSpec spec) throws IOException
-   {
-   if(i == 0) {
-   return primarySplit;
-   }
+    private FileSplit relatedSplit(FileSplit primarySplit, int i, CompoundDirSpec spec) throws IOException
+    {
+    if(i == 0) {
+    return primarySplit;
+    }
 
-   // TODO based on custom JobConf (or indirectly: InputFormat-s?)
-   String path = primarySplit.getFile().getAbsolutePath();
-   Path rpath = new Path(path + "." + i);
+    // TODO based on custom JobConf (or indirectly: InputFormat-s?)
+    String path = primarySplit.getFile().getAbsolutePath();
+    Path rpath = new Path(path + "." + i);
 
-   long rlength = fs_.getLength(rpath);
-   FileSplit related = new FileSplit(rpath, 0, rlength);
-   return related;    
-   }*/
+    long rlength = fs_.getLength(rpath);
+    FileSplit related = new FileSplit(rpath, 0, rlength);
+    return related;    
+    }*/
 
   class MergedRecordReader implements RecordReader {
 
@@ -237,7 +237,7 @@ public class MergerInputFormat extends InputFormatBase {
             src = new Text(">" + tag + "\t" + src.toString()); // breaks anything?
           } else {
             throw new UnsupportedOperationException("Cannot use with tags with key class "
-                + src.getClass());
+                                                    + src.getClass());
           }
         }
         src.write(outBuf);

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/MuxOutputFormat.java

@@ -88,7 +88,7 @@ public class MuxOutputFormat implements OutputFormat {
         int c = Integer.parseInt(s1);
         if (c < 1 || c > max) {
           String msg = "Output channel '" + s + "': must be an integer between 1 and " + max
-              + " followed by '" + CHANOUT + "' and TAB";
+            + " followed by '" + CHANOUT + "' and TAB";
           throw new IndexOutOfBoundsException(msg);
         }
         return c;

+ 101 - 101
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java

@@ -28,121 +28,121 @@ import java.util.*;
  */
 public class PathFinder
 {
-    String pathenv;        // a string of pathnames
-    String pathSep;        // the path seperator
-    String fileSep;        // the file seperator in a directory
+  String pathenv;        // a string of pathnames
+  String pathSep;        // the path seperator
+  String fileSep;        // the file seperator in a directory
 
-    /**
-     * Construct a PathFinder object using the path from
-     * java.class.path
-     */
-    public PathFinder()
-    {
-        pathenv = System.getProperty("java.class.path");
-        pathSep = System.getProperty("path.separator");
-        fileSep = System.getProperty("file.separator");
-    }
+  /**
+   * Construct a PathFinder object using the path from
+   * java.class.path
+   */
+  public PathFinder()
+  {
+    pathenv = System.getProperty("java.class.path");
+    pathSep = System.getProperty("path.separator");
+    fileSep = System.getProperty("file.separator");
+  }
 
-    /**
-     * Construct a PathFinder object using the path from
-     * the specified system environment variable.
-     */
-    public PathFinder(String envpath)
-    {
-        pathenv = System.getenv(envpath);
-        pathSep = System.getProperty("path.separator");
-        fileSep = System.getProperty("file.separator");
-    }
+  /**
+   * Construct a PathFinder object using the path from
+   * the specified system environment variable.
+   */
+  public PathFinder(String envpath)
+  {
+    pathenv = System.getenv(envpath);
+    pathSep = System.getProperty("path.separator");
+    fileSep = System.getProperty("file.separator");
+  }
 
-    /**
-     * Appends the specified component to the path list
-     */
-    public void prependPathComponent(String str)
-    {
-        pathenv = str + pathSep + pathenv;
-    }
+  /**
+   * Appends the specified component to the path list
+   */
+  public void prependPathComponent(String str)
+  {
+    pathenv = str + pathSep + pathenv;
+  }
 
-    /**
-     * Returns the full path name of this file if it is listed in the
-     * path
-     */
-    public File getAbsolutePath(String filename)
-    {
-        if (pathenv == null || pathSep == null  || fileSep == null)
-        {
-            return null;
-        }
-        int     val = -1;
-        String    classvalue = pathenv + pathSep;
+  /**
+   * Returns the full path name of this file if it is listed in the
+   * path
+   */
+  public File getAbsolutePath(String filename)
+  {
+    if (pathenv == null || pathSep == null  || fileSep == null)
+      {
+        return null;
+      }
+    int     val = -1;
+    String    classvalue = pathenv + pathSep;
 
-        while (((val = classvalue.indexOf(pathSep)) >= 0) &&
-               classvalue.length() > 0) {
-            //
-            // Extract each entry from the pathenv
-            //
-            String entry = classvalue.substring(0, val).trim();
-            File f = new File(entry);
+    while (((val = classvalue.indexOf(pathSep)) >= 0) &&
+           classvalue.length() > 0) {
+      //
+      // Extract each entry from the pathenv
+      //
+      String entry = classvalue.substring(0, val).trim();
+      File f = new File(entry);
 
-            try {
-                if (f.isDirectory()) {
-                    //
-                    // this entry in the pathenv is a directory.
-                    // see if the required file is in this directory
-                    //
-                    f = new File(entry + fileSep + filename);
-                }
-                //
-                // see if the filename matches and  we can read it
-                //
-                if (f.isFile() && f.canRead()) {
-                    return f;
-                }
-            } catch (Exception exp){ }
-            classvalue = classvalue.substring(val+1).trim();
+      try {
+        if (f.isDirectory()) {
+          //
+          // this entry in the pathenv is a directory.
+          // see if the required file is in this directory
+          //
+          f = new File(entry + fileSep + filename);
         }
-        return null;
+        //
+        // see if the filename matches and  we can read it
+        //
+        if (f.isFile() && f.canRead()) {
+          return f;
+        }
+      } catch (Exception exp){ }
+      classvalue = classvalue.substring(val+1).trim();
     }
+    return null;
+  }
 
-    /**
-     * prints all environment variables for this process
-     */
-    private static void printEnvVariables() {
-        System.out.println("Environment Variables: ");
-        Map<String,String> map = System.getenv();
-        Set<String> keys = map.keySet();
-        Iterator iter = keys.iterator();
-        while(iter.hasNext()) {
-            String thiskey = (String)(iter.next()); 
-            String value = map.get(thiskey);
-            System.out.println(thiskey + " = " + value);
-        }
+  /**
+   * prints all environment variables for this process
+   */
+  private static void printEnvVariables() {
+    System.out.println("Environment Variables: ");
+    Map<String,String> map = System.getenv();
+    Set<String> keys = map.keySet();
+    Iterator iter = keys.iterator();
+    while(iter.hasNext()) {
+      String thiskey = (String)(iter.next()); 
+      String value = map.get(thiskey);
+      System.out.println(thiskey + " = " + value);
     }
+  }
 
-    /**
-     * prints all system properties for this process
-     */
-    private static void printSystemProperties() {
-        System.out.println("System properties: ");
-        java.util.Properties p = System.getProperties();
-        java.util.Enumeration keys = p.keys();
-        while(keys.hasMoreElements()) {
-            String thiskey = (String)keys.nextElement();
-            String value = p.getProperty(thiskey);
-            System.out.println(thiskey + " = " + value);
-        }
+  /**
+   * prints all system properties for this process
+   */
+  private static void printSystemProperties() {
+    System.out.println("System properties: ");
+    java.util.Properties p = System.getProperties();
+    java.util.Enumeration keys = p.keys();
+    while(keys.hasMoreElements()) {
+      String thiskey = (String)keys.nextElement();
+      String value = p.getProperty(thiskey);
+      System.out.println(thiskey + " = " + value);
     }
+  }
 
-    public static void main(String args[]) throws IOException {
+  public static void main(String args[]) throws IOException {
 
-        if (args.length < 1) {
-            System.out.println("Usage: java PathFinder <filename>");
-            System.exit(1);
-        }
+    if (args.length < 1) {
+      System.out.println("Usage: java PathFinder <filename>");
+      System.exit(1);
+    }
 
-        PathFinder finder = new PathFinder("PATH");
-        File file = finder.getAbsolutePath(args[0]);
-        if (file != null) {
-            System.out.println("Full path name = " + file.getCanonicalPath());
-        }
+    PathFinder finder = new PathFinder("PATH");
+    File file = finder.getAbsolutePath(args[0]);
+    if (file != null) {
+      System.out.println("Full path name = " + file.getCanonicalPath());
     }
+  }
 }

+ 2 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeCombiner.java

@@ -55,8 +55,8 @@ public class PipeCombiner extends PipeReducer {
     try {
       return URLDecoder.decode(str, "UTF-8");
     } catch (UnsupportedEncodingException e) {
-        System.err.println("stream.combine.streamprocessor in jobconf not found");
-        return null;
+      System.err.println("stream.combine.streamprocessor in jobconf not found");
+      return null;
     }
   }
 

+ 21 - 21
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java

@@ -65,7 +65,7 @@ public abstract class PipeMapRed {
   abstract String getKeyColPropName();
 
   /** Write output as side-effect files rather than as map outputs.
-   This is useful to do "Map" tasks rather than "MapReduce" tasks. */
+      This is useful to do "Map" tasks rather than "MapReduce" tasks. */
   boolean getUseSideEffect() {
     return false;
   }
@@ -168,11 +168,11 @@ public abstract class PipeMapRed {
       }
       final Socket sock = new Socket(uri.getHost(), uri.getPort());
       OutputStream out = new FilterOutputStream(sock.getOutputStream()) {
-        public void close() throws IOException {
-          sock.close();
-          super.close();
-        }
-      };
+          public void close() throws IOException {
+            sock.close();
+            super.close();
+          }
+        };
       return out;
     } else {
       // a FSDataOutputStreamm, localFS or HDFS.
@@ -184,7 +184,7 @@ public abstract class PipeMapRed {
   String getSideEffectFileName() {
     FileSplit split = StreamUtil.getCurrentSplit(job_);
     return new String(split.getPath().getName() + "-" + split.getStart() + 
-            "-" + split.getLength());
+                      "-" + split.getLength());
   }
 
   public void configure(JobConf job) {
@@ -276,13 +276,13 @@ public abstract class PipeMapRed {
       // tasktracker's local working directory
       //
       if (!new File(argvSplit[0]).isAbsolute()) {
-          PathFinder finder = new PathFinder("PATH");
-          finder.prependPathComponent(jobCacheDir.toString());
-          File f = finder.getAbsolutePath(argvSplit[0]);
-          if (f != null) {
-              argvSplit[0] = f.getAbsolutePath();
-          }
-          f = null;
+        PathFinder finder = new PathFinder("PATH");
+        finder.prependPathComponent(jobCacheDir.toString());
+        File f = finder.getAbsolutePath(argvSplit[0]);
+        if (f != null) {
+          argvSplit[0] = f.getAbsolutePath();
+        }
+        f = null;
       }
       logprintln("PipeMapRed exec " + Arrays.asList(argvSplit));
       logprintln("sideEffectURI_=" + finalOutputURI);
@@ -293,11 +293,11 @@ public abstract class PipeMapRed {
       sim = Runtime.getRuntime().exec(argvSplit, childEnv.toArray());
 
       /* // This way required jdk1.5
-       Builder processBuilder = new ProcessBuilder(argvSplit);
-       Map<String, String> env = processBuilder.environment();
-       addEnvironment(env, job_.get("stream.addenvironment"));
-       sim = processBuilder.start();
-       */
+         Builder processBuilder = new ProcessBuilder(argvSplit);
+         Map<String, String> env = processBuilder.environment();
+         addEnvironment(env, job_.get("stream.addenvironment"));
+         sim = processBuilder.start();
+      */
 
       clientOut_ = new DataOutputStream(new BufferedOutputStream(sim.getOutputStream()));
       clientIn_ = new DataInputStream(new BufferedInputStream(sim.getInputStream()));
@@ -626,8 +626,8 @@ public abstract class PipeMapRed {
   String numRecInfo() {
     long elapsed = (System.currentTimeMillis() - startTime_) / 1000;
     return "R/W/S=" + numRecRead_ + "/" + numRecWritten_ + "/" + numRecSkipped_ + " in:"
-        + safeDiv(numRecRead_, elapsed) + " [rec/s]" + " out:" + safeDiv(numRecWritten_, elapsed)
-        + " [rec/s]";
+      + safeDiv(numRecRead_, elapsed) + " [rec/s]" + " out:" + safeDiv(numRecWritten_, elapsed)
+      + " [rec/s]";
   }
 
   String safeDiv(long n, long d) {

+ 3 - 3
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java

@@ -45,8 +45,8 @@ public class PipeReducer extends PipeMapRed implements Reducer {
     try {
       return URLDecoder.decode(str, "UTF-8");
     } catch (UnsupportedEncodingException e) {
-        System.err.println("stream.reduce.streamprocessor in jobconf not found");
-        return null;
+      System.err.println("stream.reduce.streamprocessor in jobconf not found");
+      return null;
     }
   }
 
@@ -61,7 +61,7 @@ public class PipeReducer extends PipeMapRed implements Reducer {
   }
 
   public void reduce(WritableComparable key, Iterator values, OutputCollector output,
-      Reporter reporter) throws IOException {
+                     Reporter reporter) throws IOException {
 
     // init
     if (doPipe_ && outThread_ == null) {

+ 3 - 3
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java

@@ -48,7 +48,7 @@ public abstract class StreamBaseRecordReader implements RecordReader {
   final static String CONF_NS = "stream.recordreader.";
 
   public StreamBaseRecordReader(FSDataInputStream in, FileSplit split, Reporter reporter,
-      JobConf job, FileSystem fs) throws IOException {
+                                JobConf job, FileSystem fs) throws IOException {
     in_ = in;
     split_ = split;
     start_ = split_.getStart();
@@ -131,9 +131,9 @@ public abstract class StreamBaseRecordReader implements RecordReader {
       recStr = record.toString();
     }
     String unqualSplit = split_.getFile().getName() + ":" + split_.getStart() + "+"
-        + split_.getLength();
+      + split_.getLength();
     String status = "HSTR " + StreamUtil.HOST + " " + numRec_ + ". pos=" + pos + " " + unqualSplit
-        + " Processing record=" + recStr;
+      + " Processing record=" + recStr;
     status += " " + splitName_;
     return status;
   }

+ 3 - 3
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamInputFormat.java

@@ -34,7 +34,7 @@ import org.apache.hadoop.mapred.*;
 public class StreamInputFormat extends KeyValueTextInputFormat {
 
   public RecordReader getRecordReader(final InputSplit genericSplit,
-      JobConf job, Reporter reporter) throws IOException {
+                                      JobConf job, Reporter reporter) throws IOException {
     String c = job.get("stream.recordreader.class");
     if (c == null || c.indexOf("LineRecordReader") >= 0) {
       return super.getRecordReader(genericSplit, job, reporter);
@@ -62,7 +62,7 @@ public class StreamInputFormat extends KeyValueTextInputFormat {
     Constructor ctor;
     try {
       ctor = readerClass.getConstructor(new Class[] { FSDataInputStream.class,
-          FileSplit.class, Reporter.class, JobConf.class, FileSystem.class });
+                                                      FileSplit.class, Reporter.class, JobConf.class, FileSystem.class });
     } catch (NoSuchMethodException nsm) {
       throw new RuntimeException(nsm);
     }
@@ -70,7 +70,7 @@ public class StreamInputFormat extends KeyValueTextInputFormat {
     RecordReader reader;
     try {
       reader = (RecordReader) ctor.newInstance(new Object[] { in, split,
-          reporter, job, fs });
+                                                              reporter, job, fs });
     } catch (Exception nsm) {
       throw new RuntimeException(nsm);
     }

+ 132 - 132
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java

@@ -87,9 +87,9 @@ public class StreamJob {
   // need these two at class level to extract values later from 
   // commons-cli command line
   private MultiPropertyOption jobconf = new MultiPropertyOption(
-      "-jobconf", "(n=v) Optional. Add or override a JobConf property.", 'D'); 
+                                                                "-jobconf", "(n=v) Optional. Add or override a JobConf property.", 'D'); 
   private MultiPropertyOption cmdenv = new MultiPropertyOption(
-      "-cmdenv", "(n=v) Pass env.var to streaming commands.", 'E');  
+                                                               "-cmdenv", "(n=v) Pass env.var to streaming commands.", 'E');  
   
   public StreamJob(String[] argv, boolean mayExit) {
     setupOptions();
@@ -199,7 +199,7 @@ public class StreamJob {
   void parseArgv(){
     CommandLine cmdLine = null ; 
     try{
-       cmdLine = parser.parse(argv_);
+      cmdLine = parser.parse(argv_);
     }catch(Exception oe){
       LOG.error(oe.getMessage());
       if (detailedUsage_) {
@@ -288,36 +288,36 @@ public class StreamJob {
   }
   
   private Option createOption(String name, String desc, 
-      String argName, int max, boolean required){
+                              String argName, int max, boolean required){
     Argument argument = argBuilder.
-                      withName(argName).
-                      withMinimum(1).
-                      withMaximum(max).
-                      create();
+      withName(argName).
+      withMinimum(1).
+      withMaximum(max).
+      create();
     return builder.
-              withLongName(name).
-              withArgument(argument).
-              withDescription(desc).
-              withRequired(required).
-              create();
+      withLongName(name).
+      withArgument(argument).
+      withDescription(desc).
+      withRequired(required).
+      create();
   }
   
   private Option createOption(String name, String desc, 
-      String argName, int max, boolean required, Validator validator){
+                              String argName, int max, boolean required, Validator validator){
     
     Argument argument = argBuilder.
-                              withName(argName).
-                              withMinimum(1).
-                              withMaximum(max).
-                              withValidator(validator).
-                              create() ;
+      withName(argName).
+      withMinimum(1).
+      withMaximum(max).
+      withValidator(validator).
+      create() ;
    
     return builder.
-              withLongName(name).
-              withArgument(argument).
-              withDescription(desc).
-              withRequired(required).
-              create();
+      withLongName(name).
+      withArgument(argument).
+      withDescription(desc).
+      withRequired(required).
+      create();
   }  
   
   private Option createBoolOption(String name, String desc){
@@ -327,82 +327,82 @@ public class StreamJob {
   private void setupOptions(){
 
     final Validator fileValidator = new Validator(){
-      public void validate(final List values) throws InvalidArgumentException {
-        // Note : This code doesnt belong here, it should be changed to 
-        // an can exec check in java 6
-        for (String file : (List<String>)values) {
-          File f = new File(file);  
-          if ( ! f.exists() ) {
-            throw new InvalidArgumentException("Argument : " + 
-                f.getAbsolutePath() + " doesn't exist."); 
-          }
-          if ( ! f.isFile() ) {
-            throw new InvalidArgumentException("Argument : " + 
-                f.getAbsolutePath() + " is not a file."); 
-          }
-          if ( ! f.canRead() ) {
-            throw new InvalidArgumentException("Argument : " + 
-                f.getAbsolutePath() + " is not accessible"); 
+        public void validate(final List values) throws InvalidArgumentException {
+          // Note : This code doesnt belong here, it should be changed to 
+          // an can exec check in java 6
+          for (String file : (List<String>)values) {
+            File f = new File(file);  
+            if ( ! f.exists() ) {
+              throw new InvalidArgumentException("Argument : " + 
+                                                 f.getAbsolutePath() + " doesn't exist."); 
+            }
+            if ( ! f.isFile() ) {
+              throw new InvalidArgumentException("Argument : " + 
+                                                 f.getAbsolutePath() + " is not a file."); 
+            }
+            if ( ! f.canRead() ) {
+              throw new InvalidArgumentException("Argument : " + 
+                                                 f.getAbsolutePath() + " is not accessible"); 
+            }
           }
-        }
-      }      
-    }; 
+        }      
+      }; 
 
     // Note: not extending CLI2's FileValidator, that overwrites 
     // the String arg into File and causes ClassCastException 
     // in inheritance tree. 
     final Validator execValidator = new Validator(){
-      public void validate(final List values) throws InvalidArgumentException {
-        // Note : This code doesnt belong here, it should be changed to 
-        // an can exec check in java 6
-        for (String file : (List<String>)values) {
-          try{
-            Runtime.getRuntime().exec("chmod 0777 " + (new File(file)).getAbsolutePath());
-          }catch(IOException ioe){
-            // ignore 
+        public void validate(final List values) throws InvalidArgumentException {
+          // Note : This code doesnt belong here, it should be changed to 
+          // an can exec check in java 6
+          for (String file : (List<String>)values) {
+            try{
+              Runtime.getRuntime().exec("chmod 0777 " + (new File(file)).getAbsolutePath());
+            }catch(IOException ioe){
+              // ignore 
+            }
           }
-        }
-        fileValidator.validate(values);
-    }      
-    }; 
+          fileValidator.validate(values);
+        }      
+      }; 
 
     Option input   = createOption("input", 
-        "DFS input file(s) for the Map step", 
-        "path", 
-        Integer.MAX_VALUE, 
-        true);  
+                                  "DFS input file(s) for the Map step", 
+                                  "path", 
+                                  Integer.MAX_VALUE, 
+                                  true);  
     
     Option output  = createOption("output", 
-        "DFS output directory for the Reduce step", 
-        "path", 1, true); 
+                                  "DFS output directory for the Reduce step", 
+                                  "path", 1, true); 
     Option mapper  = createOption("mapper", 
-        "The streaming command to run", "cmd", 1, false);
+                                  "The streaming command to run", "cmd", 1, false);
     Option combiner = createOption("combiner", 
-        "The streaming command to run", "cmd",1, false);
+                                   "The streaming command to run", "cmd",1, false);
     // reducer could be NONE 
     Option reducer = createOption("reducer", 
-        "The streaming command to run", "cmd", 1, false); 
+                                  "The streaming command to run", "cmd", 1, false); 
     Option file = createOption("file", 
-        "File/dir to be shipped in the Job jar file", 
-        "file", Integer.MAX_VALUE, false, execValidator); 
+                               "File/dir to be shipped in the Job jar file", 
+                               "file", Integer.MAX_VALUE, false, execValidator); 
     Option dfs = createOption("dfs", 
-        "Optional. Override DFS configuration", "<h:p>|local", 1, false); 
+                              "Optional. Override DFS configuration", "<h:p>|local", 1, false); 
     Option jt = createOption("jt", 
-        "Optional. Override JobTracker configuration", "<h:p>|local",1, false);
+                             "Optional. Override JobTracker configuration", "<h:p>|local",1, false);
     Option additionalconfspec = createOption("additionalconfspec", 
-        "Optional.", "spec",1, false );
+                                             "Optional.", "spec",1, false );
     Option inputformat = createOption("inputformat", 
-        "Optional.", "spec",1, false );
+                                      "Optional.", "spec",1, false );
     Option outputformat = createOption("outputformat", 
-        "Optional.", "spec",1, false );
+                                       "Optional.", "spec",1, false );
     Option partitioner = createOption("partitioner", 
-        "Optional.", "spec",1, false );
+                                      "Optional.", "spec",1, false );
     Option inputreader = createOption("inputreader", 
-        "Optional.", "spec",1, false );
+                                      "Optional.", "spec",1, false );
     Option cacheFile = createOption("cacheFile", 
-        "File name URI", "fileNameURI", 1, false);
+                                    "File name URI", "fileNameURI", 1, false);
     Option cacheArchive = createOption("cacheArchive", 
-        "File name URI", "fileNameURI",1, false);
+                                       "File name URI", "fileNameURI",1, false);
     
     // boolean properties
     
@@ -413,29 +413,29 @@ public class StreamJob {
     Option inputtagged = createBoolOption("inputtagged", "inputtagged"); 
     
     allOptions = new GroupBuilder().
-                          withOption(input).
-                          withOption(output).
-                          withOption(mapper).
-                          withOption(combiner).
-                          withOption(reducer).
-                          withOption(file).
-                          withOption(dfs).
-                          withOption(jt).
-                          withOption(additionalconfspec).
-                          withOption(inputformat).
-                          withOption(outputformat).
-                          withOption(partitioner).
-                          withOption(inputreader).
-                          withOption(jobconf).
-                          withOption(cmdenv).
-                          withOption(cacheFile).
-                          withOption(cacheArchive).
-                          withOption(verbose).
-                          withOption(info).
-                          withOption(debug).
-                          withOption(inputtagged).
-                          withOption(help).
-                          create();
+      withOption(input).
+      withOption(output).
+      withOption(mapper).
+      withOption(combiner).
+      withOption(reducer).
+      withOption(file).
+      withOption(dfs).
+      withOption(jt).
+      withOption(additionalconfspec).
+      withOption(inputformat).
+      withOption(outputformat).
+      withOption(partitioner).
+      withOption(inputreader).
+      withOption(jobconf).
+      withOption(cmdenv).
+      withOption(cacheFile).
+      withOption(cacheArchive).
+      withOption(verbose).
+      withOption(info).
+      withOption(debug).
+      withOption(inputtagged).
+      withOption(help).
+      create();
     parser.setGroup(allOptions);
     
   }
@@ -478,7 +478,7 @@ public class StreamJob {
     System.out.println("  the key part ends at first TAB, the rest of the line is the value");
     System.out.println("Custom Map input format: -inputreader package.MyRecordReader,n=v,n=v ");
     System.out
-        .println("  comma-separated name-values can be specified to configure the InputFormat");
+      .println("  comma-separated name-values can be specified to configure the InputFormat");
     System.out.println("  Ex: -inputreader 'StreamXmlRecordReader,begin=<doc>,end=</doc>'");
     System.out.println("Map output format, reduce input/output format:");
     System.out.println("  Format defined by what the mapper command outputs. Line-oriented");
@@ -495,9 +495,9 @@ public class StreamJob {
     System.out.println("To skip the sort/combine/shuffle/sort/reduce step:");
     System.out.println("  Use -reducer " + REDUCE_NONE);
     System.out
-        .println("  A Task's Map output then becomes a 'side-effect output' rather than a reduce input");
+      .println("  A Task's Map output then becomes a 'side-effect output' rather than a reduce input");
     System.out
-        .println("  This speeds up processing, This also feels more like \"in-place\" processing");
+      .println("  This speeds up processing, This also feels more like \"in-place\" processing");
     System.out.println("  because the input filename and the map input order are preserved");
     System.out.println("To specify a single side-effect output file");
     System.out.println("    -mapsideoutput [file:/C:/win|file:/unix/|socket://host:port]");//-output for side-effects will be soon deprecated
@@ -513,7 +513,7 @@ public class StreamJob {
     System.out.println("  -jobconf mapred.job.name='My Job' ");
     System.out.println("To specify that line-oriented input is in gzip format:");
     System.out
-        .println("(at this time ALL input files must be gzipped and this is not recognized based on file extension)");
+      .println("(at this time ALL input files must be gzipped and this is not recognized based on file extension)");
     System.out.println("   -jobconf stream.recordreader.compression=gzip ");
     System.out.println("To change the local temp directory:");
     System.out.println("  -jobconf dfs.data.dir=/tmp/dfs");
@@ -525,7 +525,7 @@ public class StreamJob {
     System.out.println("Use a custom hadoopStreaming build along a standard hadoop install:");
     System.out.println("  $HADOOP_HOME/bin/hadoop jar /path/my-hadoop-streaming.jar [...]\\");
     System.out
-        .println("    [...] -jobconf stream.shipped.hadoopstreaming=/path/my-hadoop-streaming.jar");
+      .println("    [...] -jobconf stream.shipped.hadoopstreaming=/path/my-hadoop-streaming.jar");
     System.out.println("For more details about jobconf parameters see:");
     System.out.println("  http://wiki.apache.org/lucene-hadoop/JobConfFile");
     System.out.println("To set an environement variable in a streaming command:");
@@ -533,7 +533,7 @@ public class StreamJob {
     System.out.println();
     System.out.println("Shortcut:");
     System.out
-        .println("   setenv HSTREAMING \"$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar\"");
+      .println("   setenv HSTREAMING \"$HADOOP_HOME/bin/hadoop jar $HADOOP_HOME/hadoop-streaming.jar\"");
     System.out.println();
     System.out.println("Example: $HSTREAMING -mapper \"/usr/local/bin/perl5 filter.pl\"");
     System.out.println("           -file /local/filter.pl -input \"/logs/0604*/*\" [...]");
@@ -619,7 +619,7 @@ public class StreamJob {
     // tmpDir=null means OS default tmp dir
     File jobJar = File.createTempFile("streamjob", ".jar", tmpDir);
     System.out.println("packageJobJar: " + packageFiles_ + " " + unjarFiles + " " + jobJar
-        + " tmpDir=" + tmpDir);
+                       + " tmpDir=" + tmpDir);
     if (debug_ == 0) {
       jobJar.deleteOnExit();
     }
@@ -709,14 +709,14 @@ public class StreamJob {
               .compareToIgnoreCase("org.apache.hadoop.mapred.KeyValueTextInputFormat") == 0)) {
         fmt = KeyValueTextInputFormat.class;
       } else if ((inputFormatSpec_
-          .compareToIgnoreCase("SequenceFileInputFormat") == 0)
-          || (inputFormatSpec_
-              .compareToIgnoreCase("org.apache.hadoop.mapred.SequenceFileInputFormat") == 0)) {
+                  .compareToIgnoreCase("SequenceFileInputFormat") == 0)
+                 || (inputFormatSpec_
+                     .compareToIgnoreCase("org.apache.hadoop.mapred.SequenceFileInputFormat") == 0)) {
         fmt = SequenceFileInputFormat.class;
       } else if ((inputFormatSpec_
-          .compareToIgnoreCase("SequenceFileToLineInputFormat") == 0)
-          || (inputFormatSpec_
-              .compareToIgnoreCase("org.apache.hadoop.mapred.SequenceFileToLineInputFormat") == 0)) {
+                  .compareToIgnoreCase("SequenceFileToLineInputFormat") == 0)
+                 || (inputFormatSpec_
+                     .compareToIgnoreCase("org.apache.hadoop.mapred.SequenceFileToLineInputFormat") == 0)) {
         fmt = SequenceFileAsTextInputFormat.class;
       } else {
         c = StreamUtil.goodClassOrNull(inputFormatSpec_, defaultPackage);
@@ -955,7 +955,7 @@ public class StreamJob {
       String hp = getJobTrackerHostPort();
       LOG.info("To kill this job, run:");
       LOG.info(getHadoopClientHome() + "/bin/hadoop job  -Dmapred.job.tracker=" + hp + " -kill "
-          + jobId_);
+               + jobId_);
       //LOG.info("Job file: " + running_.getJobFile() );
       LOG.info("Tracking URL: " + StreamUtil.qualifyHost(running_.getTrackingURL()));
     }
@@ -991,7 +991,7 @@ public class StreamJob {
         running_ = jc_.getJob(jobId_);
         String report = null;
         report = " map " + Math.round(running_.mapProgress() * 100) + "%  reduce "
-            + Math.round(running_.reduceProgress() * 100) + "%";
+          + Math.round(running_.reduceProgress() * 100) + "%";
 
         if (!report.equals(lastReport)) {
           LOG.info(report);
@@ -1006,16 +1006,16 @@ public class StreamJob {
       LOG.info("Output: " + output_);
       error = false;
     } catch(FileNotFoundException fe){
-        LOG.error("Error launching job , bad input path : " + fe.getMessage());
-      }catch(InvalidJobConfException je){
-        LOG.error("Error launching job , Invalid job conf : " + je.getMessage());
-      }catch(FileAlreadyExistsException fae){
-        LOG.error("Error launching job , Output path already exists : " 
-            + fae.getMessage());
-      }catch( IOException ioe){
-        LOG.error("Error Launching job : " + ioe.getMessage());
-      }
-      finally {
+      LOG.error("Error launching job , bad input path : " + fe.getMessage());
+    }catch(InvalidJobConfException je){
+      LOG.error("Error launching job , Invalid job conf : " + je.getMessage());
+    }catch(FileAlreadyExistsException fae){
+      LOG.error("Error launching job , Output path already exists : " 
+                + fae.getMessage());
+    }catch( IOException ioe){
+      LOG.error("Error Launching job : " + ioe.getMessage());
+    }
+    finally {
       if (error && (running_ != null)) {
         LOG.info("killJob...");
         running_.killJob();
@@ -1031,25 +1031,25 @@ public class StreamJob {
     }
     
     MultiPropertyOption(final String optionString,
-        final String description,
-        final int id){
+                        final String description,
+                        final int id){
       super(optionString, description, id) ; 
       this.optionString = optionString;
     }
 
     public boolean canProcess(final WriteableCommandLine commandLine,
-        final String argument) {
-        boolean ret = (argument != null) && argument.startsWith(optionString);
+                              final String argument) {
+      boolean ret = (argument != null) && argument.startsWith(optionString);
         
-        return ret;
+      return ret;
     }    
     public void process(final WriteableCommandLine commandLine,
-        final ListIterator arguments) throws OptionException {
+                        final ListIterator arguments) throws OptionException {
       final String arg = (String) arguments.next();
 
       if (!canProcess(commandLine, arg)) {
-          throw new OptionException(this, 
-              ResourceConstants.UNEXPECTED_TOKEN, arg);
+        throw new OptionException(this, 
+                                  ResourceConstants.UNEXPECTED_TOKEN, arg);
       }
       
       ArrayList properties = new ArrayList(); 
@@ -1127,5 +1127,5 @@ public class StreamJob {
   protected RunningJob running_;
   protected String jobId_;
   protected static String LINK_URI = "You need to specify the uris as hdfs://host:port/#linkname," +
-      "Please specify a different link name for all of your caching URIs";
+    "Please specify a different link name for all of your caching URIs";
 }

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamLineRecordReader.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.mapred.FileSplit;
 public class StreamLineRecordReader extends KeyValueLineRecordReader {
 
   public StreamLineRecordReader(Configuration job, FileSplit split)
-      throws IOException {
+    throws IOException {
     super(job, split);
   }
 }

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamSequenceRecordReader.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.mapred.SequenceFileRecordReader;
 public class StreamSequenceRecordReader extends SequenceFileRecordReader {
 
   public StreamSequenceRecordReader(Configuration conf, FileSplit split)
-      throws IOException {
+    throws IOException {
     super(conf, split);
   }
 }

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java

@@ -90,7 +90,7 @@ public class StreamUtil {
         int pos = codePath.lastIndexOf(relPath);
         if (pos == -1) {
           throw new IllegalArgumentException("invalid codePath: className=" + className
-              + " codePath=" + codePath);
+                                             + " codePath=" + codePath);
         }
         codePath = codePath.substring(0, pos);
       }

+ 12 - 12
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java

@@ -47,7 +47,7 @@ import org.apache.hadoop.mapred.JobConf;
 public class StreamXmlRecordReader extends StreamBaseRecordReader {
 
   public StreamXmlRecordReader(FSDataInputStream in, FileSplit split, Reporter reporter,
-      JobConf job, FileSystem fs) throws IOException {
+                               JobConf job, FileSystem fs) throws IOException {
     super(in, split, reporter, job, fs);
 
     beginMark_ = checkJobGet(CONF_NS + "begin");
@@ -67,8 +67,8 @@ public class StreamXmlRecordReader extends StreamBaseRecordReader {
 
   public void init() throws IOException {
     LOG.info("StreamBaseRecordReader.init: " + " start_=" + start_ + " end_=" + end_ + " length_="
-        + length_ + " start_ > in_.getPos() =" + (start_ > in_.getPos()) + " " + start_ + " > "
-        + in_.getPos());
+             + length_ + " start_ > in_.getPos() =" + (start_ > in_.getPos()) + " " + start_ + " > "
+             + in_.getPos());
     if (start_ > in_.getPos()) {
       in_.seek(start_);
     }
@@ -102,9 +102,9 @@ public class StreamXmlRecordReader extends StreamBaseRecordReader {
     ((Text) value).set("");
 
     /*if(numNext < 5) {
-     System.out.println("@@@ " + numNext + ". true next k=|" + key.toString().replaceAll("[\\r\\n]", " ")
-     + "|, len=" + buf.length() + " v=|" + value.toString().replaceAll("[\\r\\n]", " ") + "|");
-     }*/
+      System.out.println("@@@ " + numNext + ". true next k=|" + key.toString().replaceAll("[\\r\\n]", " ")
+      + "|, len=" + buf.length() + " v=|" + value.toString().replaceAll("[\\r\\n]", " ") + "|");
+      }*/
 
     return true;
   }
@@ -130,7 +130,7 @@ public class StreamXmlRecordReader extends StreamBaseRecordReader {
   }
 
   private boolean slowReadUntilMatch(Pattern markPattern, boolean includePat,
-      DataOutputBuffer outBufOrNull) throws IOException {
+                                     DataOutputBuffer outBufOrNull) throws IOException {
     try {
       long inStart = in_.getPos();
       byte[] buf = new byte[Math.max(lookAhead_, maxRecSize_)];
@@ -168,10 +168,10 @@ public class StreamXmlRecordReader extends StreamBaseRecordReader {
         }
         state = nextState(state, input, match.start());
         /*System.out.println("@@@" +
-         s + ". Match " + match.start() + " " + match.groupCount() +
-         " state=" + state + " input=" + input + 
-         " firstMatchStart_=" + firstMatchStart_ + " startinstream=" + (inStart+firstMatchStart_) + 
-         " match=" + match.group(0) + " in=" + in_.getPos());*/
+          s + ". Match " + match.start() + " " + match.groupCount() +
+          " state=" + state + " input=" + input + 
+          " firstMatchStart_=" + firstMatchStart_ + " startinstream=" + (inStart+firstMatchStart_) + 
+          " match=" + match.group(0) + " in=" + in_.getPos());*/
         if (state == RECORD_ACCEPT) {
           break;
         }
@@ -230,7 +230,7 @@ public class StreamXmlRecordReader extends StreamBaseRecordReader {
       case RECORD_MAYBE:
         return (state == CDATA_UNK) ? CDATA_UNK : RECORD_ACCEPT;
       }
-    break;
+      break;
     case CDATA_IN:
       return (input == CDATA_END) ? CDATA_OUT : CDATA_IN;
     }

+ 105 - 105
src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java

@@ -30,124 +30,124 @@ import org.apache.hadoop.io.Text;
  */
 
 public class UTF8ByteArrayUtils {
-    /**
-     * Find the first occured tab in a UTF-8 encoded string
-     * @param utf a byte array containing a UTF-8 encoded string
-     * @param start starting offset
-     * @param length no. of bytes
-     * @return position that first tab occures otherwise -1
-     */
-    public static int findTab(byte [] utf, int start, int length) {
-        for(int i=start; i<(start+length); i++) {
-            if(utf[i]==(byte)'\t') {
-                return i;
-            }
-        }
-        return -1;      
+  /**
+   * Find the first occured tab in a UTF-8 encoded string
+   * @param utf a byte array containing a UTF-8 encoded string
+   * @param start starting offset
+   * @param length no. of bytes
+   * @return position that first tab occures otherwise -1
+   */
+  public static int findTab(byte [] utf, int start, int length) {
+    for(int i=start; i<(start+length); i++) {
+      if(utf[i]==(byte)'\t') {
+        return i;
+      }
     }
+    return -1;      
+  }
     
-    /**
-     * Find the first occured tab in a UTF-8 encoded string
-     * @param utf a byte array containing a UTF-8 encoded string
-     * @return position that first tab occures otherwise -1
-     */
-    public static int findTab(byte [] utf) {
-      return findTab(utf, 0, utf.length);
-    }
+  /**
+   * Find the first occured tab in a UTF-8 encoded string
+   * @param utf a byte array containing a UTF-8 encoded string
+   * @return position that first tab occures otherwise -1
+   */
+  public static int findTab(byte [] utf) {
+    return findTab(utf, 0, utf.length);
+  }
 
-    /**
-     * split a UTF-8 byte array into key and value 
-     * assuming that the delimilator is at splitpos. 
-     * @param utf utf-8 encoded string
-     * @param start starting offset
-     * @param length no. of bytes
-     * @param key contains key upon the method is returned
-     * @param val contains value upon the method is returned
-     * @param splitPos the split pos
-     * @throws IOException
-     */
-    public static void splitKeyVal(byte[] utf, int start, int length, 
-            Text key, Text val, int splitPos) throws IOException {
-        if(splitPos<start || splitPos >= (start+length))
-            throw new IllegalArgumentException( "splitPos must be in the range " +
-                "[" + start + ", " + (start+length) + "]: " + splitPos);
-        int keyLen = (splitPos-start);
-        byte [] keyBytes = new byte[keyLen];
-        System.arraycopy(utf, start, keyBytes, 0, keyLen);
-        int valLen = (start+length)-splitPos-1;
-        byte [] valBytes = new byte[valLen];
-        System.arraycopy(utf, splitPos+1, valBytes, 0, valLen);
-        key.set(keyBytes);
-        val.set(valBytes);
-    }
+  /**
+   * split a UTF-8 byte array into key and value 
+   * assuming that the delimilator is at splitpos. 
+   * @param utf utf-8 encoded string
+   * @param start starting offset
+   * @param length no. of bytes
+   * @param key contains key upon the method is returned
+   * @param val contains value upon the method is returned
+   * @param splitPos the split pos
+   * @throws IOException
+   */
+  public static void splitKeyVal(byte[] utf, int start, int length, 
+                                 Text key, Text val, int splitPos) throws IOException {
+    if(splitPos<start || splitPos >= (start+length))
+      throw new IllegalArgumentException( "splitPos must be in the range " +
+                                          "[" + start + ", " + (start+length) + "]: " + splitPos);
+    int keyLen = (splitPos-start);
+    byte [] keyBytes = new byte[keyLen];
+    System.arraycopy(utf, start, keyBytes, 0, keyLen);
+    int valLen = (start+length)-splitPos-1;
+    byte [] valBytes = new byte[valLen];
+    System.arraycopy(utf, splitPos+1, valBytes, 0, valLen);
+    key.set(keyBytes);
+    val.set(valBytes);
+  }
     
 
-    /**
-     * split a UTF-8 byte array into key and value 
-     * assuming that the delimilator is at splitpos. 
-     * @param utf utf-8 encoded string
-     * @param key contains key upon the method is returned
-     * @param val contains value upon the method is returned
-     * @param splitPos the split pos
-     * @throws IOException
-     */
-    public static void splitKeyVal(byte[] utf, Text key, Text val, int splitPos) 
+  /**
+   * split a UTF-8 byte array into key and value 
+   * assuming that the delimilator is at splitpos. 
+   * @param utf utf-8 encoded string
+   * @param key contains key upon the method is returned
+   * @param val contains value upon the method is returned
+   * @param splitPos the split pos
+   * @throws IOException
+   */
+  public static void splitKeyVal(byte[] utf, Text key, Text val, int splitPos) 
     throws IOException {
-        splitKeyVal(utf, 0, utf.length, key, val, splitPos);
-    }
+    splitKeyVal(utf, 0, utf.length, key, val, splitPos);
+  }
     
-    /**
-     * Read a utf8 encoded line from a data input stream. 
-     * @param in data input stream
-     * @return a byte array containing the line 
-     * @throws IOException
-     */
-    public static byte[] readLine(InputStream in) throws IOException {
-      byte [] buf = new byte[128];
-      byte [] lineBuffer = buf;
-      int room = 128;
-      int offset = 0;
-      boolean isEOF = false;
-      while (true) {
-        int b = in.read();
-        if (b == -1) {
-          isEOF = true;
-          break;
-        }
+  /**
+   * Read a utf8 encoded line from a data input stream. 
+   * @param in data input stream
+   * @return a byte array containing the line 
+   * @throws IOException
+   */
+  public static byte[] readLine(InputStream in) throws IOException {
+    byte [] buf = new byte[128];
+    byte [] lineBuffer = buf;
+    int room = 128;
+    int offset = 0;
+    boolean isEOF = false;
+    while (true) {
+      int b = in.read();
+      if (b == -1) {
+        isEOF = true;
+        break;
+      }
 
-        char c = (char)b;
-        if (c == '\n')
-          break;
+      char c = (char)b;
+      if (c == '\n')
+        break;
 
-        if (c == '\r') {
-          in.mark(1);
-          int c2 = in.read();
-          if(c2 == -1) {
-              isEOF = true;
-              break;
-          }
-          if (c2 != '\n') {
-            // push it back
-            in.reset();
-          }
+      if (c == '\r') {
+        in.mark(1);
+        int c2 = in.read();
+        if(c2 == -1) {
+          isEOF = true;
           break;
         }
-        
-        if (--room < 0) {
-            buf = new byte[offset + 128];
-            room = buf.length - offset - 1;
-            System.arraycopy(lineBuffer, 0, buf, 0, offset);
-            lineBuffer = buf;
+        if (c2 != '\n') {
+          // push it back
+          in.reset();
         }
-        buf[offset++] = (byte) c;
+        break;
       }
-
-      if(isEOF && offset==0) {
-          return null;
-      } else {
-          lineBuffer = new byte[offset];
-          System.arraycopy(buf, 0, lineBuffer, 0, offset);
-          return lineBuffer;
+        
+      if (--room < 0) {
+        buf = new byte[offset + 128];
+        room = buf.length - offset - 1;
+        System.arraycopy(lineBuffer, 0, buf, 0, offset);
+        lineBuffer = buf;
       }
+      buf[offset++] = (byte) c;
+    }
+
+    if(isEOF && offset==0) {
+      return null;
+    } else {
+      lineBuffer = new byte[offset];
+      System.arraycopy(buf, 0, lineBuffer, 0, offset);
+      return lineBuffer;
     }
+  }
 }

+ 7 - 7
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestGzipInput.java

@@ -36,7 +36,7 @@ public class TestGzipInput extends TestStreaming
   protected void createInput() throws IOException
   {
     GZIPOutputStream out = new GZIPOutputStream(
-        new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
+                                                new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
     out.write(input.getBytes("UTF-8"));
     out.close();
   }
@@ -44,12 +44,12 @@ public class TestGzipInput extends TestStreaming
 
   protected String[] genArgs() {
     return new String[] {
-        "-input", INPUT_FILE.getAbsolutePath(),
-        "-output", OUTPUT_DIR.getAbsolutePath(),
-        "-mapper", map,
-        "-combiner", combine,
-        "-reducer", reduce,
-        "-jobconf", "stream.recordreader.compression=gzip"
+      "-input", INPUT_FILE.getAbsolutePath(),
+      "-output", OUTPUT_DIR.getAbsolutePath(),
+      "-mapper", map,
+      "-combiner", combine,
+      "-reducer", reduce,
+      "-jobconf", "stream.recordreader.compression=gzip"
     };
     
   }

+ 34 - 34
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java

@@ -83,12 +83,12 @@ public class TestStreamedMerge extends TestCase {
     // keys are compared as Strings and ties are broken by stream index
     // For example (k1; stream 2) < (k1; stream 3)
     String expect = i18n(
-        unt(">1\tk1\tv1\n", tag) + 
-        unt(">2\tk1\tv2\n", tag) + 
-        unt(">3\tk1\tv3\n", tag) + 
-        unt(">2\tk2\tv4\n", tag) +
-        unt(">1\tk3\tv5\n", tag)
-    );
+                         unt(">1\tk1\tv1\n", tag) + 
+                         unt(">2\tk1\tv2\n", tag) + 
+                         unt(">3\tk1\tv3\n", tag) + 
+                         unt(">2\tk2\tv4\n", tag) +
+                         unt(">1\tk3\tv5\n", tag)
+                         );
     return expect;
   }
   
@@ -128,18 +128,18 @@ public class TestStreamedMerge extends TestCase {
 
   void callStreaming(String argSideOutput, boolean inputTagged) throws IOException {
     String[] testargs = new String[] {
-        //"-jobconf", "stream.debug=1",
-        "-verbose", 
-        "-jobconf", "stream.testmerge=1", 
-        "-input", "+/input/part-00 | /input/part-01 | /input/part-02", 
-        "-mapper", StreamUtil.localizeBin("/bin/cat"), 
-        "-reducer", "NONE", 
-        "-output", "/my.output",
-        "-mapsideoutput", argSideOutput, 
-        "-dfs", conf_.get("fs.default.name"), 
-        "-jt", "local",
-        "-jobconf", "stream.sideoutput.localfs=true", 
-        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
+      //"-jobconf", "stream.debug=1",
+      "-verbose", 
+      "-jobconf", "stream.testmerge=1", 
+      "-input", "+/input/part-00 | /input/part-01 | /input/part-02", 
+      "-mapper", StreamUtil.localizeBin("/bin/cat"), 
+      "-reducer", "NONE", 
+      "-output", "/my.output",
+      "-mapsideoutput", argSideOutput, 
+      "-dfs", conf_.get("fs.default.name"), 
+      "-jt", "local",
+      "-jobconf", "stream.sideoutput.localfs=true", 
+      "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
     };
     ArrayList argList = new ArrayList();
     argList.addAll(Arrays.asList(testargs));
@@ -156,23 +156,23 @@ public class TestStreamedMerge extends TestCase {
 
   SideEffectConsumer startSideEffectConsumer(StringBuffer outBuf) {
     SideEffectConsumer t = new SideEffectConsumer(outBuf) {
-      ServerSocket listen;
-      Socket client;
-      InputStream in;
+        ServerSocket listen;
+        Socket client;
+        InputStream in;
       
-      InputStream connectInputStream() throws IOException {
-        listen = new ServerSocket(SOC_PORT);
-        client = listen.accept();
-        in = client.getInputStream();
-        return in;
-      }
+        InputStream connectInputStream() throws IOException {
+          listen = new ServerSocket(SOC_PORT);
+          client = listen.accept();
+          in = client.getInputStream();
+          return in;
+        }
       
-      void close() throws IOException
-      {
-        listen.close();
-        System.out.println("@@@listen closed");
-      }
-    };
+        void close() throws IOException
+        {
+          listen.close();
+          System.out.println("@@@listen closed");
+        }
+      };
     t.start();
     return t;
   }
@@ -264,7 +264,7 @@ public class TestStreamedMerge extends TestCase {
       sideOutput = "socket://localhost:" + SOC_PORT + "/";
     } else {
       String userOut = StreamUtil.getBoundAntProperty(
-          "hadoop.test.localoutputfile", null);
+                                                      "hadoop.test.localoutputfile", null);
       if(userOut != null) {
         f = new File(userOut);
         // don't delete so they can mkfifo

+ 12 - 12
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreaming.java

@@ -55,30 +55,30 @@ public class TestStreaming extends TestCase
   protected void createInput() throws IOException
   {
     DataOutputStream out = new DataOutputStream(
-        new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
+                                                new FileOutputStream(INPUT_FILE.getAbsoluteFile()));
     out.write(input.getBytes("UTF-8"));
     out.close();
   }
 
   protected String[] genArgs() {
     return new String[] {
-        "-input", INPUT_FILE.getAbsolutePath(),
-        "-output", OUTPUT_DIR.getAbsolutePath(),
-        "-mapper", map,
-        "-combiner", combine,
-        "-reducer", reduce,
-        //"-verbose",
-        //"-jobconf", "stream.debug=set"
-        "-jobconf", "keep.failed.task.files=true",
-        "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
-        };
+      "-input", INPUT_FILE.getAbsolutePath(),
+      "-output", OUTPUT_DIR.getAbsolutePath(),
+      "-mapper", map,
+      "-combiner", combine,
+      "-reducer", reduce,
+      //"-verbose",
+      //"-jobconf", "stream.debug=set"
+      "-jobconf", "keep.failed.task.files=true",
+      "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp")
+    };
   }
   
   public void testCommandLine()
   {
     try {
       try {
-         OUTPUT_DIR.getAbsoluteFile().delete();
+        OUTPUT_DIR.getAbsoluteFile().delete();
       } catch (Exception e) {
       }
 

+ 10 - 10
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java

@@ -66,16 +66,16 @@ public class TestSymLink extends TestCase
         String strJobtracker = "mapred.job.tracker=" + "localhost:" + mr.getJobTrackerPort();
         String strNamenode = "fs.default.name=" + namenode;
         String argv[] = new String[] {
-            "-input", INPUT_FILE,
-            "-output", OUTPUT_DIR,
-            "-mapper", map,
-            "-reducer", reduce,
-            //"-verbose",
-            //"-jobconf", "stream.debug=set"
-            "-jobconf", strNamenode,
-            "-jobconf", strJobtracker,
-            "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
-            "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
+          "-input", INPUT_FILE,
+          "-output", OUTPUT_DIR,
+          "-mapper", map,
+          "-reducer", reduce,
+          //"-verbose",
+          //"-jobconf", "stream.debug=set"
+          "-jobconf", strNamenode,
+          "-jobconf", strJobtracker,
+          "-jobconf", "stream.tmpdir="+System.getProperty("test.build.data","/tmp"),
+          "-cacheFile", "hdfs://"+fileSys.getName()+CACHE_FILE + "#testlink"
         };
 
         fileSys.delete(new Path(OUTPUT_DIR));

+ 16 - 16
src/examples/org/apache/hadoop/examples/ExampleDriver.java

@@ -26,22 +26,22 @@ import org.apache.hadoop.util.ProgramDriver;
  */
 public class ExampleDriver {
   
-    public static void main(String argv[]){
-        ProgramDriver pgd = new ProgramDriver();
-        try {
-	    pgd.addClass("wordcount", WordCount.class, 
-			 "A map/reduce program that counts the words in the input files.");
-	    pgd.addClass("grep", Grep.class, 
-			 "A map/reduce program that counts the matches of a regex in the input.");
-	    pgd.addClass("randomwriter", RandomWriter.class, 
-                        "A map/reduce program that writes 10GB of random data per node.");
-            pgd.addClass("sort", Sort.class, "A map/reduce program that sorts the data written by the random writer.");
-            pgd.addClass("pi", PiEstimator.class, "A map/reduce program that estimates Pi using monte-carlo method.");
-            pgd.driver(argv);
-	}
-	catch(Throwable e){
-	    e.printStackTrace();
-	}
+  public static void main(String argv[]){
+    ProgramDriver pgd = new ProgramDriver();
+    try {
+      pgd.addClass("wordcount", WordCount.class, 
+                   "A map/reduce program that counts the words in the input files.");
+      pgd.addClass("grep", Grep.class, 
+                   "A map/reduce program that counts the matches of a regex in the input.");
+      pgd.addClass("randomwriter", RandomWriter.class, 
+                   "A map/reduce program that writes 10GB of random data per node.");
+      pgd.addClass("sort", Sort.class, "A map/reduce program that sorts the data written by the random writer.");
+      pgd.addClass("pi", PiEstimator.class, "A map/reduce program that estimates Pi using monte-carlo method.");
+      pgd.driver(argv);
     }
+    catch(Throwable e){
+      e.printStackTrace();
+    }
+  }
 }
 	

+ 76 - 76
src/examples/org/apache/hadoop/examples/PiEstimator.java

@@ -63,25 +63,25 @@ public class PiEstimator {
      * @param reporter
      */
     public void map(WritableComparable key,
-            Writable val,
-            OutputCollector out,
-            Reporter reporter) throws IOException {
-        long nSamples = ((LongWritable) key).get();
-        for(long idx = 0; idx < nSamples; idx++) {
-            double x = r.nextDouble();
-            double y = r.nextDouble();
-            double d = (x-0.5)*(x-0.5)+(y-0.5)*(y-0.5);
-            if (d > 0.25) {
-                numOutside++;
-            } else {
-                numInside++;
-            }
-            if (idx%1000 == 1) {
-                reporter.setStatus("Generated "+idx+" samples.");
-            }
+                    Writable val,
+                    OutputCollector out,
+                    Reporter reporter) throws IOException {
+      long nSamples = ((LongWritable) key).get();
+      for(long idx = 0; idx < nSamples; idx++) {
+        double x = r.nextDouble();
+        double y = r.nextDouble();
+        double d = (x-0.5)*(x-0.5)+(y-0.5)*(y-0.5);
+        if (d > 0.25) {
+          numOutside++;
+        } else {
+          numInside++;
         }
-        out.collect(new LongWritable(0), new LongWritable(numOutside));
-        out.collect(new LongWritable(1), new LongWritable(numInside));
+        if (idx%1000 == 1) {
+          reporter.setStatus("Generated "+idx+" samples.");
+        }
+      }
+      out.collect(new LongWritable(0), new LongWritable(numOutside));
+      out.collect(new LongWritable(1), new LongWritable(numInside));
     }
     
     public void close() {
@@ -90,50 +90,50 @@ public class PiEstimator {
   }
   
   public static class PiReducer extends MapReduceBase implements Reducer {
-      long numInside = 0;
-      long numOutside = 0;
-      JobConf conf;
+    long numInside = 0;
+    long numOutside = 0;
+    JobConf conf;
       
-      /** Reducer configuration.
-       *
-       */
-      public void configure(JobConf job) {
-          conf = job;
-      }
-      /** Reduce method.
-       * @param key
-       * @param values
-       * @param output
-       * @param reporter
-       */
-      public void reduce(WritableComparable key,
-              Iterator values,
-              OutputCollector output,
-              Reporter reporter) throws IOException {
-          if (((LongWritable)key).get() == 1) {
-              while (values.hasNext()) {
-                  long num = ((LongWritable)values.next()).get();
-                  numInside += num;
-              }
-          } else {
-              while (values.hasNext()) {
-                  long num = ((LongWritable)values.next()).get();
-                  numOutside += num;
-              }
-          }
+    /** Reducer configuration.
+     *
+     */
+    public void configure(JobConf job) {
+      conf = job;
+    }
+    /** Reduce method.
+     * @param key
+     * @param values
+     * @param output
+     * @param reporter
+     */
+    public void reduce(WritableComparable key,
+                       Iterator values,
+                       OutputCollector output,
+                       Reporter reporter) throws IOException {
+      if (((LongWritable)key).get() == 1) {
+        while (values.hasNext()) {
+          long num = ((LongWritable)values.next()).get();
+          numInside += num;
+        }
+      } else {
+        while (values.hasNext()) {
+          long num = ((LongWritable)values.next()).get();
+          numOutside += num;
+        }
       }
+    }
       
-      public void close() throws IOException {
-        Path tmpDir = new Path("test-mini-mr");
-        Path outDir = new Path(tmpDir, "out");
-        Path outFile = new Path(outDir, "reduce-out");
-        FileSystem fileSys = FileSystem.get(conf);
-        SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, 
-            outFile, LongWritable.class, LongWritable.class, 
-            CompressionType.NONE);
-        writer.append(new LongWritable(numInside), new LongWritable(numOutside));
-        writer.close();
-      }
+    public void close() throws IOException {
+      Path tmpDir = new Path("test-mini-mr");
+      Path outDir = new Path(tmpDir, "out");
+      Path outFile = new Path(outDir, "reduce-out");
+      FileSystem fileSys = FileSystem.get(conf);
+      SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, conf, 
+                                                             outFile, LongWritable.class, LongWritable.class, 
+                                                             CompressionType.NONE);
+      writer.append(new LongWritable(numInside), new LongWritable(numOutside));
+      writer.close();
+    }
   }
 
   /**
@@ -141,7 +141,7 @@ public class PiEstimator {
    * monte-carlo method.
    */
   static double launch(int numMaps, long numPoints, String jt, String dfs)
-  throws IOException {
+    throws IOException {
 
     Configuration conf = new Configuration();
     JobConf jobConf = new JobConf(conf, PiEstimator.class);
@@ -180,7 +180,7 @@ public class PiEstimator {
     for(int idx=0; idx < numMaps; ++idx) {
       Path file = new Path(inDir, "part"+idx);
       SequenceFile.Writer writer = SequenceFile.createWriter(fileSys, jobConf, 
-          file, LongWritable.class, LongWritable.class, CompressionType.NONE);
+                                                             file, LongWritable.class, LongWritable.class, CompressionType.NONE);
       writer.append(new LongWritable(numPoints), new LongWritable(0));
       writer.close();
       System.out.println("Wrote input for Map #"+idx);
@@ -193,10 +193,10 @@ public class PiEstimator {
       long startTime = System.currentTimeMillis();
       JobClient.runJob(jobConf);
       System.out.println("Job Finished in "+
-              (double)(System.currentTimeMillis() - startTime)/1000.0 + " seconds");
+                         (double)(System.currentTimeMillis() - startTime)/1000.0 + " seconds");
       Path inFile = new Path(outDir, "reduce-out");
       SequenceFile.Reader reader = new SequenceFile.Reader(fileSys, inFile,
-              jobConf);
+                                                           jobConf);
       LongWritable numInside = new LongWritable();
       LongWritable numOutside = new LongWritable();
       reader.next(numInside, numOutside);
@@ -210,20 +210,20 @@ public class PiEstimator {
   }
   
   /**
-     * Launches all the tasks in order.
-     */
-    public static void main(String[] argv) throws Exception {
-        if (argv.length < 2) {
-            System.err.println("Usage: TestMiniMR <nMaps> <nSamples>");
-            return;
-        }
+   * Launches all the tasks in order.
+   */
+  public static void main(String[] argv) throws Exception {
+    if (argv.length < 2) {
+      System.err.println("Usage: TestMiniMR <nMaps> <nSamples>");
+      return;
+    }
 
-        int nMaps = Integer.parseInt(argv[0]);
-        long nSamples = Long.parseLong(argv[1]);
+    int nMaps = Integer.parseInt(argv[0]);
+    long nSamples = Long.parseLong(argv[1]);
         
-        System.out.println("Number of Maps = "+nMaps+" Samples per Map = "+nSamples);
+    System.out.println("Number of Maps = "+nMaps+" Samples per Map = "+nSamples);
         
-	System.out.println("Estimated value of PI is "+
-                launch(nMaps, nSamples, null, null));
-    }
+    System.out.println("Estimated value of PI is "+
+                       launch(nMaps, nSamples, null, null));
+  }
 }

+ 7 - 7
src/examples/org/apache/hadoop/examples/RandomWriter.java

@@ -135,16 +135,16 @@ public class RandomWriter {
       String filename = ((Text) key).toString();
       SequenceFile.Writer writer = 
         SequenceFile.createWriter(fileSys, jobConf, new Path(filename), 
-                                BytesWritable.class, BytesWritable.class,
-                                CompressionType.NONE, reporter);
+                                  BytesWritable.class, BytesWritable.class,
+                                  CompressionType.NONE, reporter);
       int itemCount = 0;
       while (numBytesToWrite > 0) {
         int keyLength = minKeySize + 
-           (keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
+          (keySizeRange != 0 ? random.nextInt(keySizeRange) : 0);
         randomKey.setSize(keyLength);
         randomizeBytes(randomKey.get(), 0, randomKey.getSize());
         int valueLength = minValueSize +
-           (valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
+          (valueSizeRange != 0 ? random.nextInt(valueSizeRange) : 0);
         randomValue.setSize(valueLength);
         randomizeBytes(randomValue.get(), 0, randomValue.getSize());
         writer.append(randomKey, randomValue);
@@ -158,7 +158,7 @@ public class RandomWriter {
       }
       reporter.setStatus("done with " + itemCount + " records.");
       writer.close();
-     }
+    }
     
     /**
      * Save the values out of the configuaration that we need to write
@@ -172,7 +172,7 @@ public class RandomWriter {
         throw new RuntimeException("Can't get default file system", e);
       }
       numBytesToWrite = job.getLong("test.randomwrite.bytes_per_map",
-                                       1*1024*1024*1024);
+                                    1*1024*1024*1024);
       minKeySize = job.getInt("test.randomwrite.min_key", 10);
       keySizeRange = 
         job.getInt("test.randomwrite.max_key", 1000) - minKeySize;
@@ -220,7 +220,7 @@ public class RandomWriter {
     JobClient client = new JobClient(job);
     ClusterStatus cluster = client.getClusterStatus();
     int numMaps = cluster.getTaskTrackers() * 
-         job.getInt("test.randomwriter.maps_per_host", 10);
+      job.getInt("test.randomwriter.maps_per_host", 10);
     job.setNumMapTasks(numMaps);
     System.out.println("Running " + numMaps + " maps.");
     job.setNumReduceTasks(1);

+ 8 - 8
src/examples/org/apache/hadoop/examples/Sort.java

@@ -67,9 +67,9 @@ public class Sort {
     JobClient client = new JobClient(jobConf);
     ClusterStatus cluster = client.getClusterStatus();
     int num_maps = cluster.getTaskTrackers() * 
-         jobConf.getInt("test.sort.maps_per_host", 10);
+      jobConf.getInt("test.sort.maps_per_host", 10);
     int num_reduces = cluster.getTaskTrackers() * 
-        jobConf.getInt("test.sort.reduces_per_host", cluster.getMaxTasks());
+      jobConf.getInt("test.sort.reduces_per_host", cluster.getMaxTasks());
     List<String> otherArgs = new ArrayList<String>();
     for(int i=0; i < args.length; ++i) {
       try {
@@ -96,7 +96,7 @@ public class Sort {
     // Make sure there are exactly 2 parameters left.
     if (otherArgs.size() != 2) {
       System.out.println("ERROR: Wrong number of parameters: " +
-          otherArgs.size() + " instead of 2.");
+                         otherArgs.size() + " instead of 2.");
       printUsage();
     }
     jobConf.setInputPath(new Path((String) otherArgs.get(0)));
@@ -106,17 +106,17 @@ public class Sort {
     //job_conf.set("mapred.job.tracker", "local");
     
     System.out.println("Running on " +
-        cluster.getTaskTrackers() +
-        " nodes to sort from " + 
-        jobConf.getInputPaths()[0] + " into " +
-        jobConf.getOutputPath() + " with " + num_reduces + " reduces.");
+                       cluster.getTaskTrackers() +
+                       " nodes to sort from " + 
+                       jobConf.getInputPaths()[0] + " into " +
+                       jobConf.getOutputPath() + " with " + num_reduces + " reduces.");
     Date startTime = new Date();
     System.out.println("Job started: " + startTime);
     JobClient.runJob(jobConf);
     Date end_time = new Date();
     System.out.println("Job ended: " + end_time);
     System.out.println("The job took " + 
-       (end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
+                       (end_time.getTime() - startTime.getTime()) /1000 + " seconds.");
   }
   
 }

+ 5 - 5
src/examples/org/apache/hadoop/examples/WordCount.java

@@ -61,8 +61,8 @@ public class WordCount {
     private Text word = new Text();
     
     public void map(WritableComparable key, Writable value, 
-        OutputCollector output, 
-        Reporter reporter) throws IOException {
+                    OutputCollector output, 
+                    Reporter reporter) throws IOException {
       String line = ((Text)value).toString();
       StringTokenizer itr = new StringTokenizer(line);
       while (itr.hasMoreTokens()) {
@@ -79,8 +79,8 @@ public class WordCount {
   public static class Reduce extends MapReduceBase implements Reducer {
     
     public void reduce(WritableComparable key, Iterator values,
-        OutputCollector output, 
-        Reporter reporter) throws IOException {
+                       OutputCollector output, 
+                       Reporter reporter) throws IOException {
       int sum = 0;
       while (values.hasNext()) {
         sum += ((IntWritable) values.next()).get();
@@ -136,7 +136,7 @@ public class WordCount {
     // Make sure there are exactly 2 parameters left.
     if (other_args.size() != 2) {
       System.out.println("ERROR: Wrong number of parameters: " +
-          other_args.size() + " instead of 2.");
+                         other_args.size() + " instead of 2.");
       printUsage();
     }
     conf.setInputPath(new Path((String) other_args.get(0)));

+ 92 - 92
src/java/org/apache/hadoop/dfs/Block.java

@@ -29,112 +29,112 @@ import org.apache.hadoop.io.*;
  **************************************************/
 class Block implements Writable, Comparable {
 
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (Block.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new Block(); }
-         });
-    }
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (Block.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new Block(); }
+       });
+  }
 
-    /**
-     */
-    public static boolean isBlockFilename(File f) {
-        if (f.getName().startsWith("blk_")) {
-            return true;
-        } else {
-            return false;
-        }
+  /**
+   */
+  public static boolean isBlockFilename(File f) {
+    if (f.getName().startsWith("blk_")) {
+      return true;
+    } else {
+      return false;
     }
+  }
 
-    long blkid;
-    long len;
+  long blkid;
+  long len;
 
-    /**
-     */
-    public Block() {
-        this.blkid = 0;
-        this.len = 0;
-    }
+  /**
+   */
+  public Block() {
+    this.blkid = 0;
+    this.len = 0;
+  }
 
-    /**
-     */
-    public Block(long blkid, long len) {
-        this.blkid = blkid;
-        this.len = len;
-    }
+  /**
+   */
+  public Block(long blkid, long len) {
+    this.blkid = blkid;
+    this.len = len;
+  }
 
-    /**
-     * Find the blockid from the given filename
-     */
-    public Block(File f, long len) {
-        String name = f.getName();
-        name = name.substring("blk_".length());
-        this.blkid = Long.parseLong(name);
-        this.len = len;
-    }
+  /**
+   * Find the blockid from the given filename
+   */
+  public Block(File f, long len) {
+    String name = f.getName();
+    name = name.substring("blk_".length());
+    this.blkid = Long.parseLong(name);
+    this.len = len;
+  }
 
-    /**
-     */
-    public long getBlockId() {
-        return blkid;
-    }
+  /**
+   */
+  public long getBlockId() {
+    return blkid;
+  }
 
-    /**
-     */
-    public String getBlockName() {
-        return "blk_" + String.valueOf(blkid);
-    }
+  /**
+   */
+  public String getBlockName() {
+    return "blk_" + String.valueOf(blkid);
+  }
 
-    /**
-     */
-    public long getNumBytes() {
-        return len;
-    }
-    public void setNumBytes(long len) {
-        this.len = len;
-    }
+  /**
+   */
+  public long getNumBytes() {
+    return len;
+  }
+  public void setNumBytes(long len) {
+    this.len = len;
+  }
 
-    /**
-     */
-    public String toString() {
-        return getBlockName();
-    }
+  /**
+   */
+  public String toString() {
+    return getBlockName();
+  }
 
-    /////////////////////////////////////
-    // Writable
-    /////////////////////////////////////
-    public void write(DataOutput out) throws IOException {
-        out.writeLong(blkid);
-        out.writeLong(len);
-    }
+  /////////////////////////////////////
+  // Writable
+  /////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    out.writeLong(blkid);
+    out.writeLong(len);
+  }
 
-    public void readFields(DataInput in) throws IOException {
-        this.blkid = in.readLong();
-        this.len = in.readLong();
-        if( len < 0 ) {
-          throw new IOException("Unexpected block size: " + len);
-        }
+  public void readFields(DataInput in) throws IOException {
+    this.blkid = in.readLong();
+    this.len = in.readLong();
+    if( len < 0 ) {
+      throw new IOException("Unexpected block size: " + len);
     }
+  }
 
-    /////////////////////////////////////
-    // Comparable
-    /////////////////////////////////////
-    public int compareTo(Object o) {
-        Block b = (Block) o;
-        if ( blkid < b.blkid ) {
-            return -1;
-        } else if ( blkid == b.blkid ) {
-            return 0;
-        } else {
-            return 1;
-        }
-    }
-    public boolean equals(Object o) {
-        return blkid == ((Block)o).blkid;
+  /////////////////////////////////////
+  // Comparable
+  /////////////////////////////////////
+  public int compareTo(Object o) {
+    Block b = (Block) o;
+    if ( blkid < b.blkid ) {
+      return -1;
+    } else if ( blkid == b.blkid ) {
+      return 0;
+    } else {
+      return 1;
     }
+  }
+  public boolean equals(Object o) {
+    return blkid == ((Block)o).blkid;
+  }
     
-    public int hashCode() {
-        return 37 * 17 + (int) (blkid^(blkid>>>32));
-    }
+  public int hashCode() {
+    return 37 * 17 + (int) (blkid^(blkid>>>32));
+  }
 }

+ 1 - 1
src/java/org/apache/hadoop/dfs/BlockCommand.java

@@ -48,7 +48,7 @@ class DatanodeCommand implements Writable {
   
   public void readFields(DataInput in) throws IOException {
     this.action = (DatanodeProtocol.DataNodeAction)
-          WritableUtils.readEnum( in, DatanodeProtocol.DataNodeAction.class );
+      WritableUtils.readEnum( in, DatanodeProtocol.DataNodeAction.class );
   }
 }
 

+ 294 - 294
src/java/org/apache/hadoop/dfs/ClientProtocol.java

@@ -29,329 +29,329 @@ import org.apache.hadoop.ipc.VersionedProtocol;
  **********************************************************************/
 interface ClientProtocol extends VersionedProtocol {
 
-    /*
-     * 11: metasave() added and reportWrittenBlock() removed.
-     */
-    public static final long versionID = 11L;  
+  /*
+   * 11: metasave() added and reportWrittenBlock() removed.
+   */
+  public static final long versionID = 11L;  
   
-    ///////////////////////////////////////
-    // File contents
-    ///////////////////////////////////////
-    /**
-     * Open an existing file, at the given name.  Returns block 
-     * and DataNode info.  DataNodes for each block are sorted by
-     * the distance to the client's address.
-     * The client will then have to contact
-     * each indicated DataNode to obtain the actual data.  There
-     * is no need to call close() or any other function after
-     * calling open().
-     */
-    public LocatedBlock[] open(String src) throws IOException;
+  ///////////////////////////////////////
+  // File contents
+  ///////////////////////////////////////
+  /**
+   * Open an existing file, at the given name.  Returns block 
+   * and DataNode info.  DataNodes for each block are sorted by
+   * the distance to the client's address.
+   * The client will then have to contact
+   * each indicated DataNode to obtain the actual data.  There
+   * is no need to call close() or any other function after
+   * calling open().
+   */
+  public LocatedBlock[] open(String src) throws IOException;
 
-    /**
-     * Create a new file.  Get back block and datanode info,
-     * which describes where the first block should be written.
-     *
-     * Successfully calling this method prevents any other 
-     * client from creating a file under the given name, but
-     * the caller must invoke complete() for the file to be
-     * added to the filesystem.
-     *
-     * Blocks have a maximum size.  Clients that intend to
-     * create multi-block files must also use reportWrittenBlock()
-     * and addBlock().
-     */
-    public LocatedBlock create( String src, 
-                                String clientName, 
-                                boolean overwrite, 
-                                short replication,
-                                long blockSize
+  /**
+   * Create a new file.  Get back block and datanode info,
+   * which describes where the first block should be written.
+   *
+   * Successfully calling this method prevents any other 
+   * client from creating a file under the given name, but
+   * the caller must invoke complete() for the file to be
+   * added to the filesystem.
+   *
+   * Blocks have a maximum size.  Clients that intend to
+   * create multi-block files must also use reportWrittenBlock()
+   * and addBlock().
+   */
+  public LocatedBlock create( String src, 
+                              String clientName, 
+                              boolean overwrite, 
+                              short replication,
+                              long blockSize
                               ) throws IOException;
 
-    /**
-     * Set replication for an existing file.
-     * 
-     * The NameNode sets replication to the new value and returns.
-     * The actual block replication is not expected to be performed during  
-     * this method call. The blocks will be populated or removed in the 
-     * background as the result of the routine block maintenance procedures.
-     * 
-     * @param src file name
-     * @param replication new replication
-     * @throws IOException
-     * @return true if successful;
-     *         false if file does not exist or is a directory
-     * @author shv
-     */
-    public boolean setReplication( String src, 
-                                short replication
-                              ) throws IOException;
+  /**
+   * Set replication for an existing file.
+   * 
+   * The NameNode sets replication to the new value and returns.
+   * The actual block replication is not expected to be performed during  
+   * this method call. The blocks will be populated or removed in the 
+   * background as the result of the routine block maintenance procedures.
+   * 
+   * @param src file name
+   * @param replication new replication
+   * @throws IOException
+   * @return true if successful;
+   *         false if file does not exist or is a directory
+   * @author shv
+   */
+  public boolean setReplication( String src, 
+                                 short replication
+                                 ) throws IOException;
 
-    /**
-     * If the client has not yet called reportWrittenBlock(), it can
-     * give up on it by calling abandonBlock().  The client can then
-     * either obtain a new block, or complete or abandon the file.
-     *
-     * Any partial writes to the block will be garbage-collected.
-     */
-    public void abandonBlock(Block b, String src) throws IOException;
+  /**
+   * If the client has not yet called reportWrittenBlock(), it can
+   * give up on it by calling abandonBlock().  The client can then
+   * either obtain a new block, or complete or abandon the file.
+   *
+   * Any partial writes to the block will be garbage-collected.
+   */
+  public void abandonBlock(Block b, String src) throws IOException;
 
-    /**
-     * A client that wants to write an additional block to the 
-     * indicated filename (which must currently be open for writing)
-     * should call addBlock().  
-     *
-     * addBlock() returns block and datanode info, just like the initial
-     * call to create().  
-     *
-     * A null response means the NameNode could not allocate a block,
-     * and that the caller should try again.
-     */
-    public LocatedBlock addBlock(String src, String clientName) throws IOException;
+  /**
+   * A client that wants to write an additional block to the 
+   * indicated filename (which must currently be open for writing)
+   * should call addBlock().  
+   *
+   * addBlock() returns block and datanode info, just like the initial
+   * call to create().  
+   *
+   * A null response means the NameNode could not allocate a block,
+   * and that the caller should try again.
+   */
+  public LocatedBlock addBlock(String src, String clientName) throws IOException;
 
-    /**
-     * A client that wants to abandon writing to the current file
-     * should call abandonFileInProgress().  After this call, any
-     * client can call create() to obtain the filename.
-     *
-     * Any blocks that have been written for the file will be 
-     * garbage-collected.
-     * @param src The filename
-     * @param holder The datanode holding the lease
-     */
-    public void abandonFileInProgress(String src, 
-                                      String holder) throws IOException;
+  /**
+   * A client that wants to abandon writing to the current file
+   * should call abandonFileInProgress().  After this call, any
+   * client can call create() to obtain the filename.
+   *
+   * Any blocks that have been written for the file will be 
+   * garbage-collected.
+   * @param src The filename
+   * @param holder The datanode holding the lease
+   */
+  public void abandonFileInProgress(String src, 
+                                    String holder) throws IOException;
 
-    /**
-     * The client is done writing data to the given filename, and would 
-     * like to complete it.  
-     *
-     * The function returns whether the file has been closed successfully.
-     * If the function returns false, the caller should try again.
-     *
-     * A call to complete() will not return true until all the file's
-     * blocks have been replicated the minimum number of times.  Thus,
-     * DataNode failures may cause a client to call complete() several
-     * times before succeeding.
-     */
-    public boolean complete(String src, String clientName) throws IOException;
+  /**
+   * The client is done writing data to the given filename, and would 
+   * like to complete it.  
+   *
+   * The function returns whether the file has been closed successfully.
+   * If the function returns false, the caller should try again.
+   *
+   * A call to complete() will not return true until all the file's
+   * blocks have been replicated the minimum number of times.  Thus,
+   * DataNode failures may cause a client to call complete() several
+   * times before succeeding.
+   */
+  public boolean complete(String src, String clientName) throws IOException;
 
-    /**
-     * The client wants to report corrupted blocks (blocks with specified
-     * locations on datanodes).
-     * @param blocks Array of located blocks to report
-     */
-    public void reportBadBlocks(LocatedBlock[] blocks) throws IOException;
+  /**
+   * The client wants to report corrupted blocks (blocks with specified
+   * locations on datanodes).
+   * @param blocks Array of located blocks to report
+   */
+  public void reportBadBlocks(LocatedBlock[] blocks) throws IOException;
 
-    ///////////////////////////////////////
-    // Namespace management
-    ///////////////////////////////////////
-    /**
-     * Rename an item in the fs namespace
-     */
-    public boolean rename(String src, String dst) throws IOException;
+  ///////////////////////////////////////
+  // Namespace management
+  ///////////////////////////////////////
+  /**
+   * Rename an item in the fs namespace
+   */
+  public boolean rename(String src, String dst) throws IOException;
 
-    /**
-     * Remove the given filename from the filesystem
-     */
-    public boolean delete(String src) throws IOException;
+  /**
+   * Remove the given filename from the filesystem
+   */
+  public boolean delete(String src) throws IOException;
 
-    /**
-     * Check whether the given file exists
-     */
-    public boolean exists(String src) throws IOException;
+  /**
+   * Check whether the given file exists
+   */
+  public boolean exists(String src) throws IOException;
 
-    /**
-     * Check whether the given filename is a directory or not.
-     */
-    public boolean isDir(String src) throws IOException;
+  /**
+   * Check whether the given filename is a directory or not.
+   */
+  public boolean isDir(String src) throws IOException;
 
-    /**
-     * Create a directory (or hierarchy of directories) with the given
-     * name.
-     */
-    public boolean mkdirs(String src) throws IOException;
+  /**
+   * Create a directory (or hierarchy of directories) with the given
+   * name.
+   */
+  public boolean mkdirs(String src) throws IOException;
 
-    /**
-     * Get a listing of the indicated directory
-     */
-    public DFSFileInfo[] getListing(String src) throws IOException;
+  /**
+   * Get a listing of the indicated directory
+   */
+  public DFSFileInfo[] getListing(String src) throws IOException;
 
-    ///////////////////////////////////////
-    // System issues and management
-    ///////////////////////////////////////
-    /**
-     * getHints() returns a list of hostnames that store data for
-     * a specific file region.  It returns a set of hostnames for 
-     * every block within the indicated region.
-     *
-     * This function is very useful when writing code that considers
-     * data-placement when performing operations.  For example, the
-     * MapReduce system tries to schedule tasks on the same machines
-     * as the data-block the task processes. 
-     */
-    public String[][] getHints(String src, long start, long len) throws IOException;
-    /**
-     * obtainLock() is used for lock managemnet.  It returns true if
-     * the lock has been seized correctly.  It returns false if the
-     * lock could not be obtained, and the client should try again.
-     *
-     * Locking is a part of most filesystems and is useful for a
-     * number of inter-process synchronization tasks.
-     */
-    /** @deprecated */ @Deprecated
+  ///////////////////////////////////////
+  // System issues and management
+  ///////////////////////////////////////
+  /**
+   * getHints() returns a list of hostnames that store data for
+   * a specific file region.  It returns a set of hostnames for 
+   * every block within the indicated region.
+   *
+   * This function is very useful when writing code that considers
+   * data-placement when performing operations.  For example, the
+   * MapReduce system tries to schedule tasks on the same machines
+   * as the data-block the task processes. 
+   */
+  public String[][] getHints(String src, long start, long len) throws IOException;
+  /**
+   * obtainLock() is used for lock managemnet.  It returns true if
+   * the lock has been seized correctly.  It returns false if the
+   * lock could not be obtained, and the client should try again.
+   *
+   * Locking is a part of most filesystems and is useful for a
+   * number of inter-process synchronization tasks.
+   */
+  /** @deprecated */ @Deprecated
     public boolean obtainLock(String src, String clientName, boolean exclusive) throws IOException;
 
-    /**
-     * releaseLock() is called if the client would like to release
-     * a held lock.  It returns true if the lock is correctly released.
-     * It returns false if the client should wait and try again.
-     */
-    /** @deprecated */ @Deprecated
+  /**
+   * releaseLock() is called if the client would like to release
+   * a held lock.  It returns true if the lock is correctly released.
+   * It returns false if the client should wait and try again.
+   */
+  /** @deprecated */ @Deprecated
     public boolean releaseLock(String src, String clientName) throws IOException;
 
-    /**
-     * Client programs can cause stateful changes in the NameNode
-     * that affect other clients.  A client may obtain a file and 
-     * neither abandon nor complete it.  A client might hold a series
-     * of locks that prevent other clients from proceeding.
-     * Clearly, it would be bad if a client held a bunch of locks
-     * that it never gave up.  This can happen easily if the client
-     * dies unexpectedly.
-     *
-     * So, the NameNode will revoke the locks and live file-creates
-     * for clients that it thinks have died.  A client tells the
-     * NameNode that it is still alive by periodically calling
-     * renewLease().  If a certain amount of time passes since
-     * the last call to renewLease(), the NameNode assumes the
-     * client has died.
-     */
-    public void renewLease(String clientName) throws IOException;
+  /**
+   * Client programs can cause stateful changes in the NameNode
+   * that affect other clients.  A client may obtain a file and 
+   * neither abandon nor complete it.  A client might hold a series
+   * of locks that prevent other clients from proceeding.
+   * Clearly, it would be bad if a client held a bunch of locks
+   * that it never gave up.  This can happen easily if the client
+   * dies unexpectedly.
+   *
+   * So, the NameNode will revoke the locks and live file-creates
+   * for clients that it thinks have died.  A client tells the
+   * NameNode that it is still alive by periodically calling
+   * renewLease().  If a certain amount of time passes since
+   * the last call to renewLease(), the NameNode assumes the
+   * client has died.
+   */
+  public void renewLease(String clientName) throws IOException;
 
-    /**
-     * Get a set of statistics about the filesystem.
-     * Right now, only two values are returned.
-     * [0] contains the total storage capacity of the system,
-     *     in bytes.
-     * [1] contains the available storage of the system, in bytes.
-     */
-    public long[] getStats() throws IOException;
+  /**
+   * Get a set of statistics about the filesystem.
+   * Right now, only two values are returned.
+   * [0] contains the total storage capacity of the system,
+   *     in bytes.
+   * [1] contains the available storage of the system, in bytes.
+   */
+  public long[] getStats() throws IOException;
 
-    /**
-     * Get a full report on the system's current datanodes.
-     * One DatanodeInfo object is returned for each DataNode.
-     */
-    public DatanodeInfo[] getDatanodeReport() throws IOException;
+  /**
+   * Get a full report on the system's current datanodes.
+   * One DatanodeInfo object is returned for each DataNode.
+   */
+  public DatanodeInfo[] getDatanodeReport() throws IOException;
 
-    /**
-     * Get the block size for the given file.
-     * @param filename The name of the file
-     * @return The number of bytes in each block
-     * @throws IOException
-     */
-    public long getBlockSize(String filename) throws IOException;
+  /**
+   * Get the block size for the given file.
+   * @param filename The name of the file
+   * @return The number of bytes in each block
+   * @throws IOException
+   */
+  public long getBlockSize(String filename) throws IOException;
 
-    /**
-     * Enter, leave or get safe mode.
-     * <p>
-     * Safe mode is a name node state when it
-     * <ol><li>does not accept changes to name space (read-only), and</li>
-     * <li>does not replicate or delete blocks.</li></ol>
-     * 
-     * <p>
-     * Safe mode is entered automatically at name node startup.
-     * Safe mode can also be entered manually using
-     * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_GET )}.
-     * <p>
-     * At startup the name node accepts data node reports collecting
-     * information about block locations.
-     * In order to leave safe mode it needs to collect a configurable
-     * percentage called threshold of blocks, which satisfy the minimal 
-     * replication condition.
-     * The minimal replication condition is that each block must have at least
-     * <tt>dfs.replication.min</tt> replicas.
-     * When the threshold is reached the name node extends safe mode
-     * for a configurable amount of time
-     * to let the remaining data nodes to check in before it
-     * will start replicating missing blocks.
-     * Then the name node leaves safe mode.
-     * <p>
-     * If safe mode is turned on manually using
-     * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_ENTER )}
-     * then the name node stays in safe mode until it is manually turned off
-     * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_LEAVE )}.
-     * Current state of the name node can be verified using
-     * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_GET )}
-     * <h4>Configuration parameters:</h4>
-     * <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
-     * <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
-     * <tt>dfs.replication.min</tt> is the minimal replication parameter.
-     * 
-     * <h4>Special cases:</h4>
-     * The name node does not enter safe mode at startup if the threshold is 
-     * set to 0 or if the name space is empty.<br>
-     * If the threshold is set to 1 then all blocks need to have at least 
-     * minimal replication.<br>
-     * If the threshold value is greater than 1 then the name node will not be 
-     * able to turn off safe mode automatically.<br>
-     * Safe mode can always be turned off manually.
-     * 
-     * @param action  <ul> <li>0 leave safe mode;</li>
-     *                <li>1 enter safe mode;</li>
-     *                <li>2 get safe mode state.</li></ul>
-     * @return <ul><li>0 if the safe mode is OFF or</li> 
-     *         <li>1 if the safe mode is ON.</li></ul>
-     * @throws IOException
-     * @author Konstantin Shvachko
-     */
-    public boolean setSafeMode( FSConstants.SafeModeAction action ) throws IOException;
+  /**
+   * Enter, leave or get safe mode.
+   * <p>
+   * Safe mode is a name node state when it
+   * <ol><li>does not accept changes to name space (read-only), and</li>
+   * <li>does not replicate or delete blocks.</li></ol>
+   * 
+   * <p>
+   * Safe mode is entered automatically at name node startup.
+   * Safe mode can also be entered manually using
+   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_GET )}.
+   * <p>
+   * At startup the name node accepts data node reports collecting
+   * information about block locations.
+   * In order to leave safe mode it needs to collect a configurable
+   * percentage called threshold of blocks, which satisfy the minimal 
+   * replication condition.
+   * The minimal replication condition is that each block must have at least
+   * <tt>dfs.replication.min</tt> replicas.
+   * When the threshold is reached the name node extends safe mode
+   * for a configurable amount of time
+   * to let the remaining data nodes to check in before it
+   * will start replicating missing blocks.
+   * Then the name node leaves safe mode.
+   * <p>
+   * If safe mode is turned on manually using
+   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_ENTER )}
+   * then the name node stays in safe mode until it is manually turned off
+   * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_LEAVE )}.
+   * Current state of the name node can be verified using
+   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_GET )}
+   * <h4>Configuration parameters:</h4>
+   * <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
+   * <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
+   * <tt>dfs.replication.min</tt> is the minimal replication parameter.
+   * 
+   * <h4>Special cases:</h4>
+   * The name node does not enter safe mode at startup if the threshold is 
+   * set to 0 or if the name space is empty.<br>
+   * If the threshold is set to 1 then all blocks need to have at least 
+   * minimal replication.<br>
+   * If the threshold value is greater than 1 then the name node will not be 
+   * able to turn off safe mode automatically.<br>
+   * Safe mode can always be turned off manually.
+   * 
+   * @param action  <ul> <li>0 leave safe mode;</li>
+   *                <li>1 enter safe mode;</li>
+   *                <li>2 get safe mode state.</li></ul>
+   * @return <ul><li>0 if the safe mode is OFF or</li> 
+   *         <li>1 if the safe mode is ON.</li></ul>
+   * @throws IOException
+   * @author Konstantin Shvachko
+   */
+  public boolean setSafeMode( FSConstants.SafeModeAction action ) throws IOException;
 
-    /**
-     * Tells the namenode to reread the hosts and exclude files. 
-     * @return True if the call was successful, false otherwise.
-     * @throws IOException
-     */
-    public void refreshNodes() throws IOException;
+  /**
+   * Tells the namenode to reread the hosts and exclude files. 
+   * @return True if the call was successful, false otherwise.
+   * @throws IOException
+   */
+  public void refreshNodes() throws IOException;
 
 
-    /**
-     * Get the size of the current edit log (in bytes).
-     * @return The number of bytes in the current edit log.
-     * @throws IOException
-     */
-    public long getEditLogSize() throws IOException;
+  /**
+   * Get the size of the current edit log (in bytes).
+   * @return The number of bytes in the current edit log.
+   * @throws IOException
+   */
+  public long getEditLogSize() throws IOException;
 
-    /**
-     * Closes the current edit log and opens a new one. The 
-     * call fails if there are already two or more edits log files or
-     * if the file system is in SafeMode.
-     * @return True if the call was successful, false otherwise.
-     * @throws IOException
-     */
-    public void rollEditLog() throws IOException;
+  /**
+   * Closes the current edit log and opens a new one. The 
+   * call fails if there are already two or more edits log files or
+   * if the file system is in SafeMode.
+   * @return True if the call was successful, false otherwise.
+   * @throws IOException
+   */
+  public void rollEditLog() throws IOException;
 
-    /**
-     * Rolls the fsImage log. It removes the old fsImage, copies the
-     * new image to fsImage, removes the old edits and renames edits.new 
-     * to edits. The call fails if any of the four files are missing.
-     * @return True if the call was successful, false otherwise.
-     * @throws IOException
-     */
-    public void rollFsImage() throws IOException;
+  /**
+   * Rolls the fsImage log. It removes the old fsImage, copies the
+   * new image to fsImage, removes the old edits and renames edits.new 
+   * to edits. The call fails if any of the four files are missing.
+   * @return True if the call was successful, false otherwise.
+   * @throws IOException
+   */
+  public void rollFsImage() throws IOException;
 
-    /**
-     * Finalize previous upgrade.
-     * Remove file system state saved during the upgrade.
-     * The upgrade will become irreversible.
-     * 
-     * @throws IOException
-     */
-    public void finalizeUpgrade() throws IOException;
+  /**
+   * Finalize previous upgrade.
+   * Remove file system state saved during the upgrade.
+   * The upgrade will become irreversible.
+   * 
+   * @throws IOException
+   */
+  public void finalizeUpgrade() throws IOException;
 
-   /**
-    * Dumps namenode data structures into specified file. If file
-    * already exists, then append.
-    * @throws IOException
-    */
-    public void metaSave(String filename) throws IOException;
+  /**
+   * Dumps namenode data structures into specified file. If file
+   * already exists, then append.
+   * @throws IOException
+   */
+  public void metaSave(String filename) throws IOException;
 }

+ 321 - 321
src/java/org/apache/hadoop/dfs/DFSAdmin.java

@@ -31,355 +31,355 @@ import org.apache.hadoop.ipc.RPC;
  */
 public class DFSAdmin extends FsShell {
 
-    /**
-     * Construct a DFSAdmin object.
-     */
-    public DFSAdmin() {
-        super();
-    }
-
-    /**
-     * Gives a report on how the FileSystem is doing.
-     * @exception IOException if the filesystem does not exist.
-     */
-    public void report() throws IOException {
-      if (fs instanceof DistributedFileSystem) {
-        DistributedFileSystem dfs = (DistributedFileSystem) fs;
-        long raw = dfs.getRawCapacity();
-        long rawUsed = dfs.getRawUsed();
-        long used = dfs.getUsed();
-        boolean mode = dfs.setSafeMode(
-                           FSConstants.SafeModeAction.SAFEMODE_GET);
-
-        if (mode) {
-          System.out.println("Safe mode is ON");
-        }
-        System.out.println("Total raw bytes: " + raw
-                           + " (" + byteDesc(raw) + ")");
-        System.out.println("Used raw bytes: " + rawUsed
-                           + " (" + byteDesc(rawUsed) + ")");
-        System.out.println("% used: "
-                           + limitDecimal(((1.0 * rawUsed) / raw) * 100, 2)
-                           + "%");
-        System.out.println();
-        System.out.println("Total effective bytes: " + used
-                           + " (" + byteDesc(used) + ")");
-        System.out.println("Effective replication multiplier: "
-                           + (1.0 * rawUsed / used));
-
-        System.out.println("-------------------------------------------------");
-        DatanodeInfo[] info = dfs.getDataNodeStats();
-        System.out.println("Datanodes available: " + info.length);
+  /**
+   * Construct a DFSAdmin object.
+   */
+  public DFSAdmin() {
+    super();
+  }
+
+  /**
+   * Gives a report on how the FileSystem is doing.
+   * @exception IOException if the filesystem does not exist.
+   */
+  public void report() throws IOException {
+    if (fs instanceof DistributedFileSystem) {
+      DistributedFileSystem dfs = (DistributedFileSystem) fs;
+      long raw = dfs.getRawCapacity();
+      long rawUsed = dfs.getRawUsed();
+      long used = dfs.getUsed();
+      boolean mode = dfs.setSafeMode(
+                                     FSConstants.SafeModeAction.SAFEMODE_GET);
+
+      if (mode) {
+        System.out.println("Safe mode is ON");
+      }
+      System.out.println("Total raw bytes: " + raw
+                         + " (" + byteDesc(raw) + ")");
+      System.out.println("Used raw bytes: " + rawUsed
+                         + " (" + byteDesc(rawUsed) + ")");
+      System.out.println("% used: "
+                         + limitDecimal(((1.0 * rawUsed) / raw) * 100, 2)
+                         + "%");
+      System.out.println();
+      System.out.println("Total effective bytes: " + used
+                         + " (" + byteDesc(used) + ")");
+      System.out.println("Effective replication multiplier: "
+                         + (1.0 * rawUsed / used));
+
+      System.out.println("-------------------------------------------------");
+      DatanodeInfo[] info = dfs.getDataNodeStats();
+      System.out.println("Datanodes available: " + info.length);
+      System.out.println();
+      for (int i = 0; i < info.length; i++) {
+        System.out.println(info[i].getDatanodeReport());
         System.out.println();
-        for (int i = 0; i < info.length; i++) {
-          System.out.println(info[i].getDatanodeReport());
-          System.out.println();
-        }
       }
     }
-
-    /**
-     * Safe mode maintenance command.
-     * Usage: java DFSAdmin -safemode [enter | leave | get]
-     * @param argv List of of command line parameters.
-     * @param idx The index of the command that is being processed.
-     * @exception IOException if the filesystem does not exist.
-     */
-    public void setSafeMode(String[] argv, int idx) throws IOException {
-      if (!(fs instanceof DistributedFileSystem)) {
-        System.err.println("FileSystem is " + fs.getName());
-        return;
-      }
-      if (idx != argv.length - 1) {
-        printUsage("-safemode");
-        return;
-      }
-      FSConstants.SafeModeAction action;
-      Boolean waitExitSafe = false;
-
-      if ("leave".equalsIgnoreCase(argv[idx])) {
-        action = FSConstants.SafeModeAction.SAFEMODE_LEAVE;
-      } else if ("enter".equalsIgnoreCase(argv[idx])) {
-        action = FSConstants.SafeModeAction.SAFEMODE_ENTER;
-      } else if ("get".equalsIgnoreCase(argv[idx])) {
-        action = FSConstants.SafeModeAction.SAFEMODE_GET;
-      } else if ("wait".equalsIgnoreCase(argv[idx])) {
-        action = FSConstants.SafeModeAction.SAFEMODE_GET;
-        waitExitSafe = true;
-      } else {
-        printUsage("-safemode");
-        return;
-      }
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
-      boolean inSafeMode = dfs.setSafeMode(action);
-
-      //
-      // If we are waiting for safemode to exit, then poll and
-      // sleep till we are out of safemode.
-      //
-      if (waitExitSafe) {
-        while (inSafeMode) {
-          try {
-            Thread.sleep(5000);
-          } catch (java.lang.InterruptedException e) {
-            throw new IOException("Wait Interrupted");
-          }
-          inSafeMode = dfs.setSafeMode(action);
+  }
+
+  /**
+   * Safe mode maintenance command.
+   * Usage: java DFSAdmin -safemode [enter | leave | get]
+   * @param argv List of of command line parameters.
+   * @param idx The index of the command that is being processed.
+   * @exception IOException if the filesystem does not exist.
+   */
+  public void setSafeMode(String[] argv, int idx) throws IOException {
+    if (!(fs instanceof DistributedFileSystem)) {
+      System.err.println("FileSystem is " + fs.getName());
+      return;
+    }
+    if (idx != argv.length - 1) {
+      printUsage("-safemode");
+      return;
+    }
+    FSConstants.SafeModeAction action;
+    Boolean waitExitSafe = false;
+
+    if ("leave".equalsIgnoreCase(argv[idx])) {
+      action = FSConstants.SafeModeAction.SAFEMODE_LEAVE;
+    } else if ("enter".equalsIgnoreCase(argv[idx])) {
+      action = FSConstants.SafeModeAction.SAFEMODE_ENTER;
+    } else if ("get".equalsIgnoreCase(argv[idx])) {
+      action = FSConstants.SafeModeAction.SAFEMODE_GET;
+    } else if ("wait".equalsIgnoreCase(argv[idx])) {
+      action = FSConstants.SafeModeAction.SAFEMODE_GET;
+      waitExitSafe = true;
+    } else {
+      printUsage("-safemode");
+      return;
+    }
+    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    boolean inSafeMode = dfs.setSafeMode(action);
+
+    //
+    // If we are waiting for safemode to exit, then poll and
+    // sleep till we are out of safemode.
+    //
+    if (waitExitSafe) {
+      while (inSafeMode) {
+        try {
+          Thread.sleep(5000);
+        } catch (java.lang.InterruptedException e) {
+          throw new IOException("Wait Interrupted");
         }
+        inSafeMode = dfs.setSafeMode(action);
       }
-
-      System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
     }
 
-    /**
-     * Command to ask the namenode to reread the hosts and excluded hosts 
-     * file.
-     * Usage: java DFSAdmin -refreshNodes
-     * @exception IOException 
-     */
-    public int refreshNodes() throws IOException {
-      int exitCode = -1;
-
-      if (!(fs instanceof DistributedFileSystem)) {
-        System.err.println("FileSystem is " + fs.getName());
-        return exitCode;
-      }
+    System.out.println("Safe mode is " + (inSafeMode ? "ON" : "OFF"));
+  }
 
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
-      dfs.refreshNodes();
-      exitCode = 0;
-   
+  /**
+   * Command to ask the namenode to reread the hosts and excluded hosts 
+   * file.
+   * Usage: java DFSAdmin -refreshNodes
+   * @exception IOException 
+   */
+  public int refreshNodes() throws IOException {
+    int exitCode = -1;
+
+    if (!(fs instanceof DistributedFileSystem)) {
+      System.err.println("FileSystem is " + fs.getName());
       return exitCode;
     }
 
-    private void printHelp(String cmd) {
-        String summary = "hadoop dfsadmin is the command to execute dfs administrative commands.\n" +
-            "The full syntax is: \n\n" +
-            "hadoop dfsadmin [-report] [-safemode <enter | leave | get | wait>]\n" +
-            "\t[-refreshNodes] [-help [cmd]]\n";
+    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    dfs.refreshNodes();
+    exitCode = 0;
+   
+    return exitCode;
+  }
 
-        String report ="-report: \tReports basic filesystem information and statistics.\n";
-        
-        String safemode = "-safemode <enter|leave|get|wait>:  Safemode maintenance command.\n" + 
-            "\t\tSafe mode is a name node state when it\n" +
-            "\t\t\t1.  does not accept changes to name space (read-only)\n" +
-            "\t\t\t2.  does not replicate or delete blocks.\n" +
-            "\t\tSafe mode is entered automatically at name node startup, and\n" +
-            "\t\tleaves safe mode automatically when the configured minimum\n" +
-            "\t\tpercentage of blocks satisfies the minimal replication\n" +
-            "\t\tcondition.  Safe mode can also be entered manually, but then\n" +
-            "\t\tcan only be turned off manually as well.\n";
-
-        String refreshNodes = "-refreshNodes: \tReread the hosts and exclude files to update the set\n" +
-            "\t\tof datanodes that are allowed to connect to the namenode\n" +
-            "\t\tand those that should be decommissioned/recommissioned.\n";
-
-        String help = "-help [cmd]: \tDisplays help for given command or all commands if none\n" +
-            "\t\tis specified.\n";
-
-        if ("report".equals(cmd)) {
-            System.out.println(report);
-        } else if ("safemode".equals(cmd)) {
-            System.out.println(safemode);
-        } else if ("refreshNodes".equals(cmd)) {
-            System.out.println(refreshNodes);
-        } else if ("help".equals(cmd)) {
-            System.out.println(help);
-        } else {
-            System.out.println(summary);
-            System.out.println(report);
-            System.out.println(safemode);
-            System.out.println(refreshNodes);
-            System.out.println(help);
-        }
+  private void printHelp(String cmd) {
+    String summary = "hadoop dfsadmin is the command to execute dfs administrative commands.\n" +
+      "The full syntax is: \n\n" +
+      "hadoop dfsadmin [-report] [-safemode <enter | leave | get | wait>]\n" +
+      "\t[-refreshNodes] [-help [cmd]]\n";
 
+    String report ="-report: \tReports basic filesystem information and statistics.\n";
+        
+    String safemode = "-safemode <enter|leave|get|wait>:  Safemode maintenance command.\n" + 
+      "\t\tSafe mode is a name node state when it\n" +
+      "\t\t\t1.  does not accept changes to name space (read-only)\n" +
+      "\t\t\t2.  does not replicate or delete blocks.\n" +
+      "\t\tSafe mode is entered automatically at name node startup, and\n" +
+      "\t\tleaves safe mode automatically when the configured minimum\n" +
+      "\t\tpercentage of blocks satisfies the minimal replication\n" +
+      "\t\tcondition.  Safe mode can also be entered manually, but then\n" +
+      "\t\tcan only be turned off manually as well.\n";
+
+    String refreshNodes = "-refreshNodes: \tReread the hosts and exclude files to update the set\n" +
+      "\t\tof datanodes that are allowed to connect to the namenode\n" +
+      "\t\tand those that should be decommissioned/recommissioned.\n";
+
+    String help = "-help [cmd]: \tDisplays help for given command or all commands if none\n" +
+      "\t\tis specified.\n";
+
+    if ("report".equals(cmd)) {
+      System.out.println(report);
+    } else if ("safemode".equals(cmd)) {
+      System.out.println(safemode);
+    } else if ("refreshNodes".equals(cmd)) {
+      System.out.println(refreshNodes);
+    } else if ("help".equals(cmd)) {
+      System.out.println(help);
+    } else {
+      System.out.println(summary);
+      System.out.println(report);
+      System.out.println(safemode);
+      System.out.println(refreshNodes);
+      System.out.println(help);
     }
 
+  }
 
-    /**
-     * Command to ask the namenode to finalize previously performed upgrade.
-     * Usage: java DFSAdmin -finalizeUpgrade
-     * @exception IOException 
-     */
-    public int finalizeUpgrade() throws IOException {
-      int exitCode = -1;
 
-      if (!(fs instanceof DistributedFileSystem)) {
-        System.out.println("FileSystem is " + fs.getUri());
-        return exitCode;
-      }
+  /**
+   * Command to ask the namenode to finalize previously performed upgrade.
+   * Usage: java DFSAdmin -finalizeUpgrade
+   * @exception IOException 
+   */
+  public int finalizeUpgrade() throws IOException {
+    int exitCode = -1;
 
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
-      dfs.finalizeUpgrade();
-      exitCode = 0;
-   
+    if (!(fs instanceof DistributedFileSystem)) {
+      System.out.println("FileSystem is " + fs.getUri());
       return exitCode;
     }
 
-    /**
-     * Dumps DFS data structures into specified file.
-     * Usage: java DFSAdmin -metasave filename
-     * @param argv List of of command line parameters.
-     * @param idx The index of the command that is being processed.
-     * @exception IOException if an error accoured wile accessing
-     *            the file or path.
-     */
-    public int metaSave(String[] argv, int idx) throws IOException {
-      String pathname = argv[idx];
-      DistributedFileSystem dfs = (DistributedFileSystem) fs;
-      dfs.metaSave(pathname);
-      System.out.println("Created file " + pathname + " on server " +
-                          dfs.getUri());
-      return 0;
+    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    dfs.finalizeUpgrade();
+    exitCode = 0;
+   
+    return exitCode;
+  }
+
+  /**
+   * Dumps DFS data structures into specified file.
+   * Usage: java DFSAdmin -metasave filename
+   * @param argv List of of command line parameters.
+   * @param idx The index of the command that is being processed.
+   * @exception IOException if an error accoured wile accessing
+   *            the file or path.
+   */
+  public int metaSave(String[] argv, int idx) throws IOException {
+    String pathname = argv[idx];
+    DistributedFileSystem dfs = (DistributedFileSystem) fs;
+    dfs.metaSave(pathname);
+    System.out.println("Created file " + pathname + " on server " +
+                       dfs.getUri());
+    return 0;
+  }
+
+  /**
+   * Displays format of commands.
+   * @param cmd The command that is being executed.
+   */
+  public void printUsage(String cmd) {
+    if ("-report".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-report]");
+    } else if ("-safemode".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-safemode enter | leave | get | wait]");
+    } else if ("-refreshNodes".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-refreshNodes]");
+    } else if ("-finalizeUpgrade".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-finalizeUpgrade]");
+    } else if ("-metasave".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+                         + " [-metasave filename]");
+    } else {
+      System.err.println("Usage: java DFSAdmin");
+      System.err.println("           [-report]");
+      System.err.println("           [-safemode enter | leave | get | wait]");
+      System.err.println("           [-refreshNodes]");
+      System.err.println("           [-finalizeUpgrade]");
+      System.err.println("           [-metasave filename]");
+      System.err.println("           [-help [cmd]]");
     }
-
-    /**
-     * Displays format of commands.
-     * @param cmd The command that is being executed.
-     */
-    public void printUsage(String cmd) {
-          if ("-report".equals(cmd)) {
-            System.err.println("Usage: java DFSAdmin"
-                + " [-report]");
-          } else if ("-safemode".equals(cmd)) {
-            System.err.println("Usage: java DFSAdmin"
-                + " [-safemode enter | leave | get | wait]");
-          } else if ("-refreshNodes".equals(cmd)) {
-            System.err.println("Usage: java DFSAdmin"
-                + " [-refreshNodes]");
-          } else if ("-finalizeUpgrade".equals(cmd)) {
-            System.err.println("Usage: java DFSAdmin"
-                + " [-finalizeUpgrade]");
-           } else if ("-metasave".equals(cmd)) {
-             System.err.println("Usage: java DFSAdmin"
-                 + " [-metasave filename]");
-          } else {
-            System.err.println("Usage: java DFSAdmin");
-            System.err.println("           [-report]");
-            System.err.println("           [-safemode enter | leave | get | wait]");
-            System.err.println("           [-refreshNodes]");
-            System.err.println("           [-finalizeUpgrade]");
-            System.err.println("           [-metasave filename]");
-            System.err.println("           [-help [cmd]]");
-          }
+  }
+
+  /**
+   * @param argv The parameters passed to this program.
+   * @exception Exception if the filesystem does not exist.
+   * @return 0 on success, non zero on error.
+   */
+  public int run(String[] argv) throws Exception {
+
+    if (argv.length < 1) {
+      printUsage("");
+      return -1;
     }
 
-    /**
-     * @param argv The parameters passed to this program.
-     * @exception Exception if the filesystem does not exist.
-     * @return 0 on success, non zero on error.
-     */
-    public int run(String[] argv) throws Exception {
-
-        if (argv.length < 1) {
-            printUsage("");
-            return -1;
-        }
-
-        int exitCode = -1;
-        int i = 0;
-        String cmd = argv[i++];
-
-        //
-        // verify that we have enough command line parameters
-        //
-        if ("-safemode".equals(cmd)) {
-                if (argv.length != 2) {
-                  printUsage(cmd);
-                  return exitCode;
-                }
-        } else if ("-report".equals(cmd)) {
-                if (argv.length != 1) {
-                  printUsage(cmd);
-                  return exitCode;
-                }
-        } else if ("-refreshNodes".equals(cmd)) {
-                if (argv.length != 1) {
-                  printUsage(cmd);
-                  return exitCode;
-                }
-        } else if ("-finalizeUpgrade".equals(cmd)) {
-                if (argv.length != 1) {
-                  printUsage(cmd);
-                  return exitCode;
-                }
-        } else if ("-metasave".equals(cmd)) {
-                if (argv.length != 2) {
-                  printUsage(cmd);
-                  return exitCode;
-                }
-        }
+    int exitCode = -1;
+    int i = 0;
+    String cmd = argv[i++];
 
+    //
+    // verify that we have enough command line parameters
+    //
+    if ("-safemode".equals(cmd)) {
+      if (argv.length != 2) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-report".equals(cmd)) {
+      if (argv.length != 1) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-refreshNodes".equals(cmd)) {
+      if (argv.length != 1) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-finalizeUpgrade".equals(cmd)) {
+      if (argv.length != 1) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-metasave".equals(cmd)) {
+      if (argv.length != 2) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    }
 
-        // initialize DFSAdmin
-        try {
-            init();
-        } catch (RPC.VersionMismatch v) {
-            System.err.println("Version Mismatch between client and server"
-                               + "... command aborted.");
-            return exitCode;
-        } catch (IOException e) {
-            System.err.println("Bad connection to DFS... command aborted.");
-            return exitCode;
-        }
 
-        exitCode = 0;
-        try {
-            if ("-report".equals(cmd)) {
-                report();
-            } else if ("-safemode".equals(cmd)) {
-                setSafeMode(argv, i);
-            } else if ("-refreshNodes".equals(cmd)) {
-                exitCode = refreshNodes();
-            } else if ("-finalizeUpgrade".equals(cmd)) {
-                exitCode = finalizeUpgrade();
-            } else if ("-metasave".equals(cmd)) {
-                 exitCode = metaSave(argv, i);
-            } else if ("-help".equals(cmd)) {
-                if (i < argv.length) {
-                    printHelp(argv[i]);
-                } else {
-                    printHelp("");
-                }
-            } else {
-                exitCode = -1;
-                System.err.println(cmd.substring(1) + ": Unknown command");
-                printUsage("");
-            }
-        } catch (RemoteException e) {
-          //
-          // This is a error returned by hadoop server. Print
-          // out the first line of the error mesage, ignore the stack trace.
-          exitCode = -1;
-          try {
-            String[] content;
-            content = e.getLocalizedMessage().split("\n");
-            System.err.println(cmd.substring(1) + ": "
-                               + content[0]);
-          } catch (Exception ex) {
-            System.err.println(cmd.substring(1) + ": "
-                               + ex.getLocalizedMessage());
-          }
-        } catch (IOException e) {
-          //
-          // IO exception encountered locally.
-          //
-          exitCode = -1;
-          System.err.println(cmd.substring(1) + ": "
-                             + e.getLocalizedMessage());
-        } finally {
-            fs.close();
-        }
-        return exitCode;
+    // initialize DFSAdmin
+    try {
+      init();
+    } catch (RPC.VersionMismatch v) {
+      System.err.println("Version Mismatch between client and server"
+                         + "... command aborted.");
+      return exitCode;
+    } catch (IOException e) {
+      System.err.println("Bad connection to DFS... command aborted.");
+      return exitCode;
     }
 
-    /**
-     * main() has some simple utility methods.
-     * @param argv Command line parameters.
-     * @exception Exception if the filesystem does not exist.
-     */
-    public static void main(String[] argv) throws Exception {
-        int res = new DFSAdmin().doMain(new Configuration(), argv);
-        System.exit(res);
+    exitCode = 0;
+    try {
+      if ("-report".equals(cmd)) {
+        report();
+      } else if ("-safemode".equals(cmd)) {
+        setSafeMode(argv, i);
+      } else if ("-refreshNodes".equals(cmd)) {
+        exitCode = refreshNodes();
+      } else if ("-finalizeUpgrade".equals(cmd)) {
+        exitCode = finalizeUpgrade();
+      } else if ("-metasave".equals(cmd)) {
+        exitCode = metaSave(argv, i);
+      } else if ("-help".equals(cmd)) {
+        if (i < argv.length) {
+          printHelp(argv[i]);
+        } else {
+          printHelp("");
+        }
+      } else {
+        exitCode = -1;
+        System.err.println(cmd.substring(1) + ": Unknown command");
+        printUsage("");
+      }
+    } catch (RemoteException e) {
+      //
+      // This is a error returned by hadoop server. Print
+      // out the first line of the error mesage, ignore the stack trace.
+      exitCode = -1;
+      try {
+        String[] content;
+        content = e.getLocalizedMessage().split("\n");
+        System.err.println(cmd.substring(1) + ": "
+                           + content[0]);
+      } catch (Exception ex) {
+        System.err.println(cmd.substring(1) + ": "
+                           + ex.getLocalizedMessage());
+      }
+    } catch (IOException e) {
+      //
+      // IO exception encountered locally.
+      //
+      exitCode = -1;
+      System.err.println(cmd.substring(1) + ": "
+                         + e.getLocalizedMessage());
+    } finally {
+      fs.close();
     }
+    return exitCode;
+  }
+
+  /**
+   * main() has some simple utility methods.
+   * @param argv Command line parameters.
+   * @exception Exception if the filesystem does not exist.
+   */
+  public static void main(String[] argv) throws Exception {
+    int res = new DFSAdmin().doMain(new Configuration(), argv);
+    System.exit(res);
+  }
 }

+ 1236 - 1236
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -42,1400 +42,1400 @@ import java.util.*;
  * @author Mike Cafarella, Tessa MacDuff
  ********************************************************/
 class DFSClient implements FSConstants {
-    public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.DFSClient");
-    static int MAX_BLOCK_ACQUIRE_FAILURES = 3;
-    private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
-    private static final long DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024;
-    ClientProtocol namenode;
-    boolean running = true;
-    Random r = new Random();
-    String clientName;
-    Daemon leaseChecker;
-    private Configuration conf;
-    private long defaultBlockSize;
-    private short defaultReplication;
+  public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.DFSClient");
+  static int MAX_BLOCK_ACQUIRE_FAILURES = 3;
+  private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
+  private static final long DEFAULT_BLOCK_SIZE = 64 * 1024 * 1024;
+  ClientProtocol namenode;
+  boolean running = true;
+  Random r = new Random();
+  String clientName;
+  Daemon leaseChecker;
+  private Configuration conf;
+  private long defaultBlockSize;
+  private short defaultReplication;
     
-    /**
-     * A map from name -> DFSOutputStream of files that are currently being
-     * written by this client.
-     */
-    private TreeMap pendingCreates = new TreeMap();
+  /**
+   * A map from name -> DFSOutputStream of files that are currently being
+   * written by this client.
+   */
+  private TreeMap pendingCreates = new TreeMap();
     
-    /**
-     * A class to track the list of DFS clients, so that they can be closed
-     * on exit.
-     * @author Owen O'Malley
-     */
-    private static class ClientFinalizer extends Thread {
-      private List clients = new ArrayList();
-
-      public synchronized void addClient(DFSClient client) {
-        clients.add(client);
-      }
+  /**
+   * A class to track the list of DFS clients, so that they can be closed
+   * on exit.
+   * @author Owen O'Malley
+   */
+  private static class ClientFinalizer extends Thread {
+    private List clients = new ArrayList();
+
+    public synchronized void addClient(DFSClient client) {
+      clients.add(client);
+    }
 
-      public synchronized void run() {
-        Iterator itr = clients.iterator();
-        while (itr.hasNext()) {
-          DFSClient client = (DFSClient) itr.next();
-          if (client.running) {
-            try {
-              client.close();
-            } catch (IOException ie) {
-              System.err.println("Error closing client");
-              ie.printStackTrace();
-            }
+    public synchronized void run() {
+      Iterator itr = clients.iterator();
+      while (itr.hasNext()) {
+        DFSClient client = (DFSClient) itr.next();
+        if (client.running) {
+          try {
+            client.close();
+          } catch (IOException ie) {
+            System.err.println("Error closing client");
+            ie.printStackTrace();
           }
         }
       }
     }
+  }
 
-    // add a cleanup thread
-    private static ClientFinalizer clientFinalizer = new ClientFinalizer();
-    static {
-      Runtime.getRuntime().addShutdownHook(clientFinalizer);
-    }
+  // add a cleanup thread
+  private static ClientFinalizer clientFinalizer = new ClientFinalizer();
+  static {
+    Runtime.getRuntime().addShutdownHook(clientFinalizer);
+  }
 
         
-    /** 
-     * Create a new DFSClient connected to the given namenode server.
-     */
-    public DFSClient(InetSocketAddress nameNodeAddr, Configuration conf)
+  /** 
+   * Create a new DFSClient connected to the given namenode server.
+   */
+  public DFSClient(InetSocketAddress nameNodeAddr, Configuration conf)
     throws IOException {
-        this.conf = conf;
-        this.namenode = (ClientProtocol) RPC.getProxy(ClientProtocol.class,
-            ClientProtocol.versionID, nameNodeAddr, conf);
-        String taskId = conf.get("mapred.task.id");
-        if (taskId != null) {
-            this.clientName = "DFSClient_" + taskId; 
-        } else {
-            this.clientName = "DFSClient_" + r.nextInt();
-        }
-        defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
-        defaultReplication = (short) conf.getInt("dfs.replication", 3);
-        this.leaseChecker = new Daemon(new LeaseChecker());
-        this.leaseChecker.start();
+    this.conf = conf;
+    this.namenode = (ClientProtocol) RPC.getProxy(ClientProtocol.class,
+                                                  ClientProtocol.versionID, nameNodeAddr, conf);
+    String taskId = conf.get("mapred.task.id");
+    if (taskId != null) {
+      this.clientName = "DFSClient_" + taskId; 
+    } else {
+      this.clientName = "DFSClient_" + r.nextInt();
     }
-
-    private void checkOpen() throws IOException {
-      if (!running) {
-        IOException result = new IOException("Filesystem closed");
-        throw result;
-      }
+    defaultBlockSize = conf.getLong("dfs.block.size", DEFAULT_BLOCK_SIZE);
+    defaultReplication = (short) conf.getInt("dfs.replication", 3);
+    this.leaseChecker = new Daemon(new LeaseChecker());
+    this.leaseChecker.start();
+  }
+
+  private void checkOpen() throws IOException {
+    if (!running) {
+      IOException result = new IOException("Filesystem closed");
+      throw result;
     }
+  }
     
-    /**
-     * Close the file system, abadoning all of the leases and files being
-     * created.
-     */
-    public void close() throws IOException {
-      // synchronize in here so that we don't need to change the API
-      synchronized (this) {
-        checkOpen();
-        synchronized (pendingCreates) {
-          Iterator file_itr = pendingCreates.keySet().iterator();
-          while (file_itr.hasNext()) {
-            String name = (String) file_itr.next();
-            try {
-              namenode.abandonFileInProgress(name, clientName);
-            } catch (IOException ie) {
-              System.err.println("Exception abandoning create lock on " + name);
-              ie.printStackTrace();
-            }
+  /**
+   * Close the file system, abadoning all of the leases and files being
+   * created.
+   */
+  public void close() throws IOException {
+    // synchronize in here so that we don't need to change the API
+    synchronized (this) {
+      checkOpen();
+      synchronized (pendingCreates) {
+        Iterator file_itr = pendingCreates.keySet().iterator();
+        while (file_itr.hasNext()) {
+          String name = (String) file_itr.next();
+          try {
+            namenode.abandonFileInProgress(name, clientName);
+          } catch (IOException ie) {
+            System.err.println("Exception abandoning create lock on " + name);
+            ie.printStackTrace();
           }
-          pendingCreates.clear();
         }
-        this.running = false;
-        try {
-            leaseChecker.join();
-        } catch (InterruptedException ie) {
+        pendingCreates.clear();
+      }
+      this.running = false;
+      try {
+        leaseChecker.join();
+      } catch (InterruptedException ie) {
+      }
+    }
+  }
+
+  /**
+   * Get the default block size for this cluster
+   * @return the default block size in bytes
+   */
+  public long getDefaultBlockSize() {
+    return defaultBlockSize;
+  }
+    
+  public long getBlockSize(UTF8 f) throws IOException {
+    int retries = 4;
+    while (true) {
+      try {
+        return namenode.getBlockSize(f.toString());
+      } catch (IOException ie) {
+        if (--retries == 0) {
+          LOG.warn("Problem getting block size: " + 
+                   StringUtils.stringifyException(ie));
+          throw ie;
         }
+        LOG.debug("Problem getting block size: " + 
+                  StringUtils.stringifyException(ie));
       }
     }
-
-    /**
-     * Get the default block size for this cluster
-     * @return the default block size in bytes
-     */
-    public long getDefaultBlockSize() {
-      return defaultBlockSize;
+  }
+
+  /**
+   * Report corrupt blocks that were discovered by the client.
+   */
+  public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
+    namenode.reportBadBlocks(blocks);
+  }
+  
+  public short getDefaultReplication() {
+    return defaultReplication;
+  }
+    
+  /**
+   * Get hints about the location of the indicated block(s).  The
+   * array returned is as long as there are blocks in the indicated
+   * range.  Each block may have one or more locations.
+   */
+  public String[][] getHints(UTF8 src, long start, long len) throws IOException {
+    return namenode.getHints(src.toString(), start, len);
+  }
+
+  /**
+   * Create an input stream that obtains a nodelist from the
+   * namenode, and then reads from all the right places.  Creates
+   * inner subclass of InputStream that does the right out-of-band
+   * work.
+   */
+  public DFSInputStream open(UTF8 src) throws IOException {
+    checkOpen();
+    //    Get block info from namenode
+    return new DFSInputStream(src.toString());
+  }
+
+  /**
+   * Create a new dfs file and return an output stream for writing into it. 
+   * 
+   * @param src stream name
+   * @param overwrite do not check for file existence if true
+   * @return output stream
+   * @throws IOException
+   */
+  public OutputStream create( UTF8 src, 
+                              boolean overwrite
+                              ) throws IOException {
+    return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+  }
+    
+  /**
+   * Create a new dfs file and return an output stream for writing into it
+   * with write-progress reporting. 
+   * 
+   * @param src stream name
+   * @param overwrite do not check for file existence if true
+   * @return output stream
+   * @throws IOException
+   */
+  public OutputStream create( UTF8 src, 
+                              boolean overwrite,
+                              Progressable progress
+                              ) throws IOException {
+    return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+  }
+    
+  /**
+   * Create a new dfs file with the specified block replication 
+   * and return an output stream for writing into the file.  
+   * 
+   * @param src stream name
+   * @param overwrite do not check for file existence if true
+   * @param replication block replication
+   * @return output stream
+   * @throws IOException
+   */
+  public OutputStream create( UTF8 src, 
+                              boolean overwrite, 
+                              short replication,
+                              long blockSize
+                              ) throws IOException {
+    return create(src, overwrite, replication, blockSize, null);
+  }
+
+  /**
+   * Create a new dfs file with the specified block replication 
+   * with write-progress reporting and return an output stream for writing
+   * into the file.  
+   * 
+   * @param src stream name
+   * @param overwrite do not check for file existence if true
+   * @param replication block replication
+   * @return output stream
+   * @throws IOException
+   */
+  public OutputStream create( UTF8 src, 
+                              boolean overwrite, 
+                              short replication,
+                              long blockSize,
+                              Progressable progress
+                              ) throws IOException {
+    checkOpen();
+    OutputStream result = new DFSOutputStream(src, overwrite, 
+                                              replication, blockSize, progress);
+    synchronized (pendingCreates) {
+      pendingCreates.put(src.toString(), result);
     }
+    return result;
+  }
+  /**
+   * Set replication for an existing file.
+   * 
+   * @see ClientProtocol#setReplication(String, short)
+   * @param replication
+   * @throws IOException
+   * @return true is successful or false if file does not exist 
+   * @author shv
+   */
+  public boolean setReplication(UTF8 src, 
+                                short replication
+                                ) throws IOException {
+    return namenode.setReplication(src.toString(), replication);
+  }
+
+  /**
+   * Make a direct connection to namenode and manipulate structures
+   * there.
+   */
+  public boolean rename(UTF8 src, UTF8 dst) throws IOException {
+    checkOpen();
+    return namenode.rename(src.toString(), dst.toString());
+  }
+
+  /**
+   * Make a direct connection to namenode and manipulate structures
+   * there.
+   */
+  public boolean delete(UTF8 src) throws IOException {
+    checkOpen();
+    return namenode.delete(src.toString());
+  }
+
+  /**
+   */
+  public boolean exists(UTF8 src) throws IOException {
+    checkOpen();
+    return namenode.exists(src.toString());
+  }
+
+  /**
+   */
+  public boolean isDirectory(UTF8 src) throws IOException {
+    checkOpen();
+    return namenode.isDir(src.toString());
+  }
+
+  /**
+   */
+  public DFSFileInfo[] listPaths(UTF8 src) throws IOException {
+    checkOpen();
+    return namenode.getListing(src.toString());
+  }
+
+  /**
+   */
+  public long totalRawCapacity() throws IOException {
+    long rawNums[] = namenode.getStats();
+    return rawNums[0];
+  }
+
+  /**
+   */
+  public long totalRawUsed() throws IOException {
+    long rawNums[] = namenode.getStats();
+    return rawNums[1];
+  }
+
+  public DatanodeInfo[] datanodeReport() throws IOException {
+    return namenode.getDatanodeReport();
+  }
     
-    public long getBlockSize(UTF8 f) throws IOException {
-      int retries = 4;
-      while (true) {
+  /**
+   * Enter, leave or get safe mode.
+   * See {@link ClientProtocol#setSafeMode(FSConstants.SafeModeAction)} 
+   * for more details.
+   * 
+   * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
+   */
+  public boolean setSafeMode( SafeModeAction action ) throws IOException {
+    return namenode.setSafeMode( action );
+  }
+
+  /**
+   * Refresh the hosts and exclude files.  (Rereads them.)
+   * See {@link ClientProtocol#refreshNodes()} 
+   * for more details.
+   * 
+   * @see ClientProtocol#refreshNodes()
+   */
+  public void refreshNodes() throws IOException {
+    namenode.refreshNodes();
+  }
+
+  /**
+   * Dumps DFS data structures into specified file.
+   * See {@link ClientProtocol#metaSave()} 
+   * for more details.
+   * 
+   * @see ClientProtocol#metaSave()
+   */
+  public void metaSave(String pathname) throws IOException {
+    namenode.metaSave(pathname);
+  }
+    
+  /**
+   * @see ClientProtocol#finalizeUpgrade()
+   */
+  public void finalizeUpgrade() throws IOException {
+    namenode.finalizeUpgrade();
+  }
+
+  /**
+   */
+  public boolean mkdirs(UTF8 src) throws IOException {
+    checkOpen();
+    return namenode.mkdirs(src.toString());
+  }
+
+  /**
+   */
+  public void lock(UTF8 src, boolean exclusive) throws IOException {
+    long start = System.currentTimeMillis();
+    boolean hasLock = false;
+    while (! hasLock) {
+      hasLock = namenode.obtainLock(src.toString(), clientName, exclusive);
+      if (! hasLock) {
         try {
-          return namenode.getBlockSize(f.toString());
-        } catch (IOException ie) {
-          if (--retries == 0) {
-            LOG.warn("Problem getting block size: " + 
-                      StringUtils.stringifyException(ie));
-            throw ie;
+          Thread.sleep(400);
+          if (System.currentTimeMillis() - start > 5000) {
+            LOG.info("Waiting to retry lock for " + (System.currentTimeMillis() - start) + " ms.");
+            Thread.sleep(2000);
           }
-          LOG.debug("Problem getting block size: " + 
-                    StringUtils.stringifyException(ie));
+        } catch (InterruptedException ie) {
         }
       }
     }
-
+  }
+
+  /**
+   *
+   */
+  public void release(UTF8 src) throws IOException {
+    boolean hasReleased = false;
+    while (! hasReleased) {
+      hasReleased = namenode.releaseLock(src.toString(), clientName);
+      if (! hasReleased) {
+        LOG.info("Could not release.  Retrying...");
+        try {
+          Thread.sleep(2000);
+        } catch (InterruptedException ie) {
+        }
+      }
+    }
+  }
+
+  /**
+   * Pick the best node from which to stream the data.
+   * Entries in <i>nodes</i> are already in the priority order
+   */
+  private DatanodeInfo bestNode(DatanodeInfo nodes[], TreeSet deadNodes) throws IOException {
+    if (nodes != null) { 
+      for (int i = 0; i < nodes.length; i++) {
+        if (!deadNodes.contains(nodes[i])) {
+          return nodes[i];
+        }
+      }
+    }
+    throw new IOException("No live nodes contain current block");
+  }
+
+  /***************************************************************
+   * Periodically check in with the namenode and renew all the leases
+   * when the lease period is half over.
+   ***************************************************************/
+  class LeaseChecker implements Runnable {
     /**
-     * Report corrupt blocks that were discovered by the client.
      */
-    public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
-      namenode.reportBadBlocks(blocks);
+    public void run() {
+      long lastRenewed = 0;
+      while (running) {
+        if (System.currentTimeMillis() - lastRenewed > (LEASE_SOFTLIMIT_PERIOD / 2)) {
+          try {
+            if( pendingCreates.size() > 0 )
+              namenode.renewLease(clientName);
+            lastRenewed = System.currentTimeMillis();
+          } catch (IOException ie) {
+            String err = StringUtils.stringifyException(ie);
+            LOG.warn("Problem renewing lease for " + clientName +
+                     ": " + err);
+          }
+        }
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException ie) {
+        }
+      }
     }
-  
-    public short getDefaultReplication() {
-      return defaultReplication;
+  }
+
+  /** Utility class to encapsulate data node info and its ip address. */
+  private static class DNAddrPair {
+    DatanodeInfo info;
+    InetSocketAddress addr;
+    DNAddrPair(DatanodeInfo info, InetSocketAddress addr) {
+      this.info = info;
+      this.addr = addr;
     }
-    
+  }
+        
+  /****************************************************************
+   * DFSInputStream provides bytes from a named file.  It handles 
+   * negotiation of the namenode and various datanodes as necessary.
+   ****************************************************************/
+  class DFSInputStream extends FSInputStream {
+    private Socket s = null;
+    boolean closed = false;
+
+    private String src;
+    private DataInputStream blockStream;
+    private Block blocks[] = null;
+    private DatanodeInfo nodes[][] = null;
+    private DatanodeInfo currentNode = null;
+    private Block currentBlock = null;
+    private long pos = 0;
+    private long filelen = 0;
+    private long blockEnd = -1;
+    private TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
+        
     /**
-     * Get hints about the location of the indicated block(s).  The
-     * array returned is as long as there are blocks in the indicated
-     * range.  Each block may have one or more locations.
      */
-    public String[][] getHints(UTF8 src, long start, long len) throws IOException {
-        return namenode.getHints(src.toString(), start, len);
+    public DFSInputStream(String src) throws IOException {
+      this.src = src;
+      openInfo();
+      this.blockStream = null;
+      for (int i = 0; i < blocks.length; i++) {
+        this.filelen += blocks[i].getNumBytes();
+      }
     }
 
     /**
-     * Create an input stream that obtains a nodelist from the
-     * namenode, and then reads from all the right places.  Creates
-     * inner subclass of InputStream that does the right out-of-band
-     * work.
+     * Grab the open-file info from namenode
      */
-    public DFSInputStream open(UTF8 src) throws IOException {
-        checkOpen();
-        //    Get block info from namenode
-        return new DFSInputStream(src.toString());
+    synchronized void openInfo() throws IOException {
+      Block oldBlocks[] = this.blocks;
+
+      LocatedBlock results[] = namenode.open(src);            
+      Vector blockV = new Vector();
+      Vector nodeV = new Vector();
+      for (int i = 0; i < results.length; i++) {
+        blockV.add(results[i].getBlock());
+        nodeV.add(results[i].getLocations());
+      }
+      Block newBlocks[] = (Block[]) blockV.toArray(new Block[blockV.size()]);
+
+      if (oldBlocks != null) {
+        for (int i = 0; i < oldBlocks.length; i++) {
+          if (! oldBlocks[i].equals(newBlocks[i])) {
+            throw new IOException("Blocklist for " + src + " has changed!");
+          }
+        }
+        if (oldBlocks.length != newBlocks.length) {
+          throw new IOException("Blocklist for " + src + " now has different length");
+        }
+      }
+      this.blocks = newBlocks;
+      this.nodes = (DatanodeInfo[][]) nodeV.toArray(new DatanodeInfo[nodeV.size()][]);
+      this.currentNode = null;
     }
 
     /**
-     * Create a new dfs file and return an output stream for writing into it. 
-     * 
-     * @param src stream name
-     * @param overwrite do not check for file existence if true
-     * @return output stream
-     * @throws IOException
+     * Returns the datanode from which the stream is currently reading.
      */
-    public OutputStream create( UTF8 src, 
-                                  boolean overwrite
-                                ) throws IOException {
-      return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+    public DatanodeInfo getCurrentDatanode() {
+      return currentNode;
     }
-    
+
     /**
-     * Create a new dfs file and return an output stream for writing into it
-     * with write-progress reporting. 
-     * 
-     * @param src stream name
-     * @param overwrite do not check for file existence if true
-     * @return output stream
-     * @throws IOException
+     * Returns the block containing the target position. 
      */
-    public OutputStream create( UTF8 src, 
-                                  boolean overwrite,
-                                  Progressable progress
-                                ) throws IOException {
-      return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+    public Block getCurrentBlock() {
+      return currentBlock;
     }
-    
+
+
     /**
-     * Create a new dfs file with the specified block replication 
-     * and return an output stream for writing into the file.  
-     * 
-     * @param src stream name
-     * @param overwrite do not check for file existence if true
-     * @param replication block replication
-     * @return output stream
-     * @throws IOException
+     * Used by the automatic tests to detemine blocks locations of a
+     * file
      */
-    public OutputStream create( UTF8 src, 
-                                  boolean overwrite, 
-                                  short replication,
-                                  long blockSize
-                                ) throws IOException {
-      return create(src, overwrite, replication, blockSize, null);
+    synchronized DatanodeInfo[][] getDataNodes() {
+      return nodes;
     }
 
     /**
-     * Create a new dfs file with the specified block replication 
-     * with write-progress reporting and return an output stream for writing
-     * into the file.  
-     * 
-     * @param src stream name
-     * @param overwrite do not check for file existence if true
-     * @param replication block replication
-     * @return output stream
-     * @throws IOException
+     * Open a DataInputStream to a DataNode so that it can be read from.
+     * We get block ID and the IDs of the destinations at startup, from the namenode.
      */
-    public OutputStream create( UTF8 src, 
-                                  boolean overwrite, 
-                                  short replication,
-                                  long blockSize,
-                                  Progressable progress
-                                ) throws IOException {
-      checkOpen();
-      OutputStream result = new DFSOutputStream(src, overwrite, 
-                                                  replication, blockSize, progress);
-      synchronized (pendingCreates) {
-        pendingCreates.put(src.toString(), result);
+    private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
+      if (target >= filelen) {
+        throw new IOException("Attempted to read past end of file");
       }
-      return result;
-    }
-    /**
-     * Set replication for an existing file.
-     * 
-     * @see ClientProtocol#setReplication(String, short)
-     * @param replication
-     * @throws IOException
-     * @return true is successful or false if file does not exist 
-     * @author shv
-     */
-    public boolean setReplication(UTF8 src, 
-                                  short replication
-                                ) throws IOException {
-      return namenode.setReplication(src.toString(), replication);
-    }
 
-    /**
-     * Make a direct connection to namenode and manipulate structures
-     * there.
-     */
-    public boolean rename(UTF8 src, UTF8 dst) throws IOException {
-        checkOpen();
-        return namenode.rename(src.toString(), dst.toString());
-    }
+      if (s != null) {
+        s.close();
+        s = null;
+      }
 
-    /**
-     * Make a direct connection to namenode and manipulate structures
-     * there.
-     */
-    public boolean delete(UTF8 src) throws IOException {
-        checkOpen();
-        return namenode.delete(src.toString());
+      //
+      // Compute desired block
+      //
+      int targetBlock = -1;
+      long targetBlockStart = 0;
+      long targetBlockEnd = 0;
+      for (int i = 0; i < blocks.length; i++) {
+        long blocklen = blocks[i].getNumBytes();
+        targetBlockEnd = targetBlockStart + blocklen - 1;
+
+        if (target >= targetBlockStart && target <= targetBlockEnd) {
+          targetBlock = i;
+          break;
+        } else {
+          targetBlockStart = targetBlockEnd + 1;                    
+        }
+      }
+      if (targetBlock < 0) {
+        throw new IOException("Impossible situation: could not find target position " + target);
+      }
+      long offsetIntoBlock = target - targetBlockStart;
+
+      //
+      // Connect to best DataNode for desired Block, with potential offset
+      //
+      DatanodeInfo chosenNode = null;
+      while (s == null) {
+        DNAddrPair retval = chooseDataNode(targetBlock);
+        chosenNode = retval.info;
+        InetSocketAddress targetAddr = retval.addr;
+
+        try {
+          s = new Socket();
+          s.connect(targetAddr, READ_TIMEOUT);
+          s.setSoTimeout(READ_TIMEOUT);
+
+          //
+          // Xmit header info to datanode
+          //
+          DataOutputStream out = new DataOutputStream(new BufferedOutputStream(s.getOutputStream()));
+          out.write(OP_READSKIP_BLOCK);
+          blocks[targetBlock].write(out);
+          out.writeLong(offsetIntoBlock);
+          out.flush();
+
+          //
+          // Get bytes in block, set streams
+          //
+          DataInputStream in = new DataInputStream(new BufferedInputStream(s.getInputStream()));
+          long curBlockSize = in.readLong();
+          long amtSkipped = in.readLong();
+          if (curBlockSize != blocks[targetBlock].len) {
+            throw new IOException("Recorded block size is " + blocks[targetBlock].len + ", but datanode reports size of " + curBlockSize);
+          }
+          if (amtSkipped != offsetIntoBlock) {
+            throw new IOException("Asked for offset of " + offsetIntoBlock + ", but only received offset of " + amtSkipped);
+          }
+
+          this.pos = target;
+          this.blockEnd = targetBlockEnd;
+          this.currentBlock = blocks[targetBlock];
+          this.blockStream = in;
+          return chosenNode;
+        } catch (IOException ex) {
+          // Put chosen node into dead list, continue
+          LOG.debug("Failed to connect to " + targetAddr + ":" 
+                    + StringUtils.stringifyException(ex));
+          deadNodes.add(chosenNode);
+          if (s != null) {
+            try {
+              s.close();
+            } catch (IOException iex) {
+            }                        
+          }
+          s = null;
+        }
+      }
+      return chosenNode;
     }
 
     /**
+     * Close it down!
      */
-    public boolean exists(UTF8 src) throws IOException {
-        checkOpen();
-        return namenode.exists(src.toString());
+    public synchronized void close() throws IOException {
+      checkOpen();
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
+
+      if (s != null) {
+        blockStream.close();
+        s.close();
+        s = null;
+      }
+      super.close();
+      closed = true;
     }
 
     /**
+     * Basic read()
      */
-    public boolean isDirectory(UTF8 src) throws IOException {
-        checkOpen();
-        return namenode.isDir(src.toString());
+    public synchronized int read() throws IOException {
+      checkOpen();
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
+      int result = -1;
+      if (pos < filelen) {
+        if (pos > blockEnd) {
+          currentNode = blockSeekTo(pos);
+        }
+        result = blockStream.read();
+        if (result >= 0) {
+          pos++;
+        }
+      }
+      return result;
     }
 
     /**
+     * Read the entire buffer.
      */
-    public DFSFileInfo[] listPaths(UTF8 src) throws IOException {
-        checkOpen();
-        return namenode.getListing(src.toString());
+    public synchronized int read(byte buf[], int off, int len) throws IOException {
+      checkOpen();
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
+      if (pos < filelen) {
+        int retries = 2;
+        while (retries > 0) {
+          try {
+            if (pos > blockEnd) {
+              currentNode = blockSeekTo(pos);
+            }
+            int realLen = Math.min(len, (int) (blockEnd - pos + 1));
+            int result = blockStream.read(buf, off, realLen);
+            if (result >= 0) {
+              pos += result;
+            }
+            return result;
+          } catch (IOException e) {
+            if (retries == 1) {
+              LOG.warn("DFS Read: " + StringUtils.stringifyException(e));
+            }
+            blockEnd = -1;
+            if (currentNode != null) { deadNodes.add(currentNode); }
+            if (--retries == 0) {
+              throw e;
+            }
+          }
+        }
+      }
+      return -1;
     }
 
+        
+    private DNAddrPair chooseDataNode(int blockId)
+      throws IOException {
+      int failures = 0;
+      while (true) {
+        try {
+          DatanodeInfo chosenNode = bestNode(nodes[blockId], deadNodes);
+          InetSocketAddress targetAddr = DataNode.createSocketAddr(chosenNode.getName());
+          return new DNAddrPair(chosenNode, targetAddr);
+        } catch (IOException ie) {
+          String blockInfo =
+            blocks[blockId]+" file="+src;
+          if (failures >= MAX_BLOCK_ACQUIRE_FAILURES) {
+            throw new IOException("Could not obtain block: " + blockInfo);
+          }
+          if (nodes[blockId] == null || nodes[blockId].length == 0) {
+            LOG.info("No node available for block: " + blockInfo);
+          }
+          LOG.info("Could not obtain block " + blockId + " from any node:  " + ie);
+          try {
+            Thread.sleep(3000);
+          } catch (InterruptedException iex) {
+          }
+          deadNodes.clear(); //2nd option is to remove only nodes[blockId]
+          openInfo();
+          failures++;
+          continue;
+        }
+      }
+    } 
+        
+    private void fetchBlockByteRange(int blockId, long start,
+                                     long end, byte[] buf, int offset) throws IOException {
+      //
+      // Connect to best DataNode for desired Block, with potential offset
+      //
+      Socket dn = null;
+      while (dn == null) {
+        DNAddrPair retval = chooseDataNode(blockId);
+        DatanodeInfo chosenNode = retval.info;
+        InetSocketAddress targetAddr = retval.addr;
+            
+        try {
+          dn = new Socket();
+          dn.connect(targetAddr, READ_TIMEOUT);
+          dn.setSoTimeout(READ_TIMEOUT);
+              
+          //
+          // Xmit header info to datanode
+          //
+          DataOutputStream out = new DataOutputStream(new BufferedOutputStream(dn.getOutputStream()));
+          out.write(OP_READ_RANGE_BLOCK);
+          blocks[blockId].write(out);
+          out.writeLong(start);
+          out.writeLong(end);
+          out.flush();
+              
+          //
+          // Get bytes in block, set streams
+          //
+          DataInputStream in = new DataInputStream(new BufferedInputStream(dn.getInputStream()));
+          long curBlockSize = in.readLong();
+          long actualStart = in.readLong();
+          long actualEnd = in.readLong();
+          if (curBlockSize != blocks[blockId].len) {
+            throw new IOException("Recorded block size is " +
+                                  blocks[blockId].len + ", but datanode reports size of " +
+                                  curBlockSize);
+          }
+          if ((actualStart != start) || (actualEnd != end)) {
+            throw new IOException("Asked for byte range  " + start +
+                                  "-" + end + ", but only received range " + actualStart +
+                                  "-" + actualEnd);
+          }
+          int nread = in.read(buf, offset, (int)(end - start + 1));
+        } catch (IOException ex) {
+          // Put chosen node into dead list, continue
+          LOG.debug("Failed to connect to " + targetAddr + ":" 
+                    + StringUtils.stringifyException(ex));
+          deadNodes.add(chosenNode);
+          if (dn != null) {
+            try {
+              dn.close();
+            } catch (IOException iex) {
+            }
+          }
+          dn = null;
+        }
+      }
+    }
+        
+    public int read(long position, byte[] buf, int off, int len)
+      throws IOException {
+      // sanity checks
+      checkOpen();
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
+      if ((position < 0) || (position > filelen)) {
+        return -1;
+      }
+      int realLen = len;
+      if ((position + len) > filelen) {
+        realLen = (int)(filelen - position);
+      }
+      // determine the block and byte range within the block
+      // corresponding to position and realLen
+      int targetBlock = -1;
+      long targetStart = 0;
+      long targetEnd = 0;
+      for (int idx = 0; idx < blocks.length; idx++) {
+        long blocklen = blocks[idx].getNumBytes();
+        targetEnd = targetStart + blocklen - 1;
+        if (position >= targetStart && position <= targetEnd) {
+          targetBlock = idx;
+          targetStart = position - targetStart;
+          targetEnd = Math.min(blocklen, targetStart + realLen) - 1;
+          realLen = (int)(targetEnd - targetStart + 1);
+          break;
+        }
+        targetStart += blocklen;
+      }
+      if (targetBlock < 0) {
+        throw new IOException(
+                              "Impossible situation: could not find target position "+
+                              position);
+      }
+      fetchBlockByteRange(targetBlock, targetStart, targetEnd, buf, off);
+      return realLen;
+    }
+        
     /**
+     * Seek to a new arbitrary location
      */
-    public long totalRawCapacity() throws IOException {
-        long rawNums[] = namenode.getStats();
-        return rawNums[0];
+    public synchronized void seek(long targetPos) throws IOException {
+      if (targetPos > filelen) {
+        throw new IOException("Cannot seek after EOF");
+      }
+      boolean done = false;
+      if (pos <= targetPos && targetPos <= blockEnd) {
+        //
+        // If this seek is to a positive position in the current
+        // block, and this piece of data might already be lying in
+        // the TCP buffer, then just eat up the intervening data.
+        //
+        int diff = (int)(targetPos - pos);
+        if (diff <= TCP_WINDOW_SIZE) {
+          blockStream.skipBytes(diff);
+          pos += diff;
+          assert(pos == targetPos);
+          done = true;
+        }
+      }
+      if (!done) {
+        pos = targetPos;
+        blockEnd = -1;
+      }
     }
 
     /**
+     * Seek to given position on a node other than the current node.  If
+     * a node other than the current node is found, then returns true. 
+     * If another node could not be found, then returns false.
      */
-    public long totalRawUsed() throws IOException {
-        long rawNums[] = namenode.getStats();
-        return rawNums[1];
-    }
-
-    public DatanodeInfo[] datanodeReport() throws IOException {
-        return namenode.getDatanodeReport();
+    public synchronized boolean seekToNewSource(long targetPos) throws IOException {
+      boolean markedDead = deadNodes.contains(currentNode);
+      deadNodes.add(currentNode);
+      DatanodeInfo oldNode = currentNode;
+      DatanodeInfo newNode = blockSeekTo(targetPos);
+      if ( !markedDead ) {
+        /* remove it from deadNodes. blockSeekTo could have cleared 
+         * deadNodes and added currentNode again. Thats ok. */
+        deadNodes.remove(oldNode);
+      }
+      if (!oldNode.getStorageID().equals(newNode.getStorageID())) {
+        currentNode = newNode;
+        return true;
+      } else {
+        return false;
+      }
     }
-    
+        
     /**
-     * Enter, leave or get safe mode.
-     * See {@link ClientProtocol#setSafeMode(FSConstants.SafeModeAction)} 
-     * for more details.
-     * 
-     * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
      */
-    public boolean setSafeMode( SafeModeAction action ) throws IOException {
-      return namenode.setSafeMode( action );
+    public synchronized long getPos() throws IOException {
+      return pos;
     }
 
     /**
-     * Refresh the hosts and exclude files.  (Rereads them.)
-     * See {@link ClientProtocol#refreshNodes()} 
-     * for more details.
-     * 
-     * @see ClientProtocol#refreshNodes()
      */
-    public void refreshNodes() throws IOException {
-      namenode.refreshNodes();
+    public synchronized int available() throws IOException {
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
+      return (int) (filelen - pos);
     }
 
     /**
-     * Dumps DFS data structures into specified file.
-     * See {@link ClientProtocol#metaSave()} 
-     * for more details.
-     * 
-     * @see ClientProtocol#metaSave()
+     * We definitely don't support marks
      */
-    public void metaSave(String pathname) throws IOException {
-      namenode.metaSave(pathname);
+    public boolean markSupported() {
+      return false;
+    }
+    public void mark(int readLimit) {
     }
+    public void reset() throws IOException {
+      throw new IOException("Mark not supported");
+    }
+  }
     
-    /**
-     * @see ClientProtocol#finalizeUpgrade()
-     */
-    public void finalizeUpgrade() throws IOException {
-      namenode.finalizeUpgrade();
+  static class DFSDataInputStream extends FSDataInputStream {
+    DFSDataInputStream(DFSInputStream in, Configuration conf)
+      throws IOException {
+      super(in, conf);
     }
-
+      
+    DFSDataInputStream(DFSInputStream in, int bufferSize) throws IOException {
+      super(in, bufferSize);
+    }
+      
     /**
+     * Returns the datanode from which the stream is currently reading.
      */
-    public boolean mkdirs(UTF8 src) throws IOException {
-        checkOpen();
-        return namenode.mkdirs(src.toString());
+    public DatanodeInfo getCurrentDatanode() {
+      return ((DFSInputStream)inStream).getCurrentDatanode();
     }
-
+      
     /**
+     * Returns the block containing the target position. 
      */
-    public void lock(UTF8 src, boolean exclusive) throws IOException {
-        long start = System.currentTimeMillis();
-        boolean hasLock = false;
-        while (! hasLock) {
-            hasLock = namenode.obtainLock(src.toString(), clientName, exclusive);
-            if (! hasLock) {
-                try {
-                    Thread.sleep(400);
-                    if (System.currentTimeMillis() - start > 5000) {
-                        LOG.info("Waiting to retry lock for " + (System.currentTimeMillis() - start) + " ms.");
-                        Thread.sleep(2000);
-                    }
-                } catch (InterruptedException ie) {
-                }
-            }
-        }
+    public Block getCurrentBlock() {
+      return ((DFSInputStream)inStream).getCurrentBlock();
     }
 
     /**
-     *
+     * Used by the automatic tests to detemine blocks locations of a
+     * file
      */
-    public void release(UTF8 src) throws IOException {
-        boolean hasReleased = false;
-        while (! hasReleased) {
-            hasReleased = namenode.releaseLock(src.toString(), clientName);
-            if (! hasReleased) {
-                LOG.info("Could not release.  Retrying...");
-                try {
-                    Thread.sleep(2000);
-                } catch (InterruptedException ie) {
-                }
-            }
-        }
+    synchronized DatanodeInfo[][] getDataNodes() {
+      return ((DFSInputStream)inStream).getDataNodes();
     }
 
+  }
+
+  /****************************************************************
+   * DFSOutputStream creates files from a stream of bytes.
+   ****************************************************************/
+  class DFSOutputStream extends OutputStream {
+    private Socket s;
+    boolean closed = false;
+
+    private byte outBuf[] = new byte[BUFFER_SIZE];
+    private int pos = 0;
+
+    private UTF8 src;
+    private boolean overwrite;
+    private short replication;
+    private boolean firstTime = true;
+    private DataOutputStream blockStream;
+    private DataInputStream blockReplyStream;
+    private File backupFile;
+    private OutputStream backupStream;
+    private Block block;
+    private long filePos = 0;
+    private int bytesWrittenToBlock = 0;
+    private String datanodeName;
+    private long blockSize;
+
+    private Progressable progress;
     /**
-     * Pick the best node from which to stream the data.
-     * Entries in <i>nodes</i> are already in the priority order
+     * Create a new output stream to the given DataNode.
      */
-    private DatanodeInfo bestNode(DatanodeInfo nodes[], TreeSet deadNodes) throws IOException {
-      if (nodes != null) { 
-        for (int i = 0; i < nodes.length; i++) {
-          if (!deadNodes.contains(nodes[i])) {
-            return nodes[i];
-          }
-        }
+    public DFSOutputStream(UTF8 src, boolean overwrite, 
+                           short replication, long blockSize,
+                           Progressable progress
+                           ) throws IOException {
+      this.src = src;
+      this.overwrite = overwrite;
+      this.replication = replication;
+      this.backupFile = newBackupFile();
+      this.blockSize = blockSize;
+      this.backupStream = new FileOutputStream(backupFile);
+      this.progress = progress;
+      if (progress != null) {
+        LOG.debug("Set non-null progress callback on DFSOutputStream "+src);
       }
-        throw new IOException("No live nodes contain current block");
     }
 
-    /***************************************************************
-     * Periodically check in with the namenode and renew all the leases
-     * when the lease period is half over.
-     ***************************************************************/
-    class LeaseChecker implements Runnable {
-        /**
-         */
-        public void run() {
-            long lastRenewed = 0;
-            while (running) {
-                if (System.currentTimeMillis() - lastRenewed > (LEASE_SOFTLIMIT_PERIOD / 2)) {
-                    try {
-                      if( pendingCreates.size() > 0 )
-                        namenode.renewLease(clientName);
-                      lastRenewed = System.currentTimeMillis();
-                    } catch (IOException ie) {
-                      String err = StringUtils.stringifyException(ie);
-                      LOG.warn("Problem renewing lease for " + clientName +
-                                  ": " + err);
-                    }
-                }
-                try {
-                    Thread.sleep(1000);
-                } catch (InterruptedException ie) {
-                }
-            }
-        }
+    /* Wrapper for closing backupStream. This sets backupStream to null so
+     * that we do not attempt to write to backupStream that could be
+     * invalid in subsequent writes. Otherwise we might end trying to write
+     * filedescriptor that we don't own.
+     */
+    private void closeBackupStream() throws IOException {
+      if ( backupStream != null ) {
+        OutputStream stream = backupStream;
+        backupStream = null;
+        stream.close();
+      }   
     }
-
-    /** Utility class to encapsulate data node info and its ip address. */
-    private static class DNAddrPair {
-      DatanodeInfo info;
-      InetSocketAddress addr;
-      DNAddrPair(DatanodeInfo info, InetSocketAddress addr) {
-        this.info = info;
-        this.addr = addr;
+    /* Similar to closeBackupStream(). Theoritically deleting a file
+     * twice could result in deleting a file that we should not.
+     */
+    private void deleteBackupFile() {
+      if ( backupFile != null ) {
+        File file = backupFile;
+        backupFile = null;
+        file.delete();
       }
     }
         
-    /****************************************************************
-     * DFSInputStream provides bytes from a named file.  It handles 
-     * negotiation of the namenode and various datanodes as necessary.
-     ****************************************************************/
-    class DFSInputStream extends FSInputStream {
-        private Socket s = null;
-        boolean closed = false;
-
-        private String src;
-        private DataInputStream blockStream;
-        private Block blocks[] = null;
-        private DatanodeInfo nodes[][] = null;
-        private DatanodeInfo currentNode = null;
-        private Block currentBlock = null;
-        private long pos = 0;
-        private long filelen = 0;
-        private long blockEnd = -1;
-        private TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
-        
-        /**
-         */
-        public DFSInputStream(String src) throws IOException {
-            this.src = src;
-            openInfo();
-            this.blockStream = null;
-            for (int i = 0; i < blocks.length; i++) {
-                this.filelen += blocks[i].getNumBytes();
-            }
-        }
-
-        /**
-         * Grab the open-file info from namenode
-         */
-        synchronized void openInfo() throws IOException {
-            Block oldBlocks[] = this.blocks;
-
-            LocatedBlock results[] = namenode.open(src);            
-            Vector blockV = new Vector();
-            Vector nodeV = new Vector();
-            for (int i = 0; i < results.length; i++) {
-                blockV.add(results[i].getBlock());
-                nodeV.add(results[i].getLocations());
-            }
-            Block newBlocks[] = (Block[]) blockV.toArray(new Block[blockV.size()]);
-
-            if (oldBlocks != null) {
-                for (int i = 0; i < oldBlocks.length; i++) {
-                    if (! oldBlocks[i].equals(newBlocks[i])) {
-                        throw new IOException("Blocklist for " + src + " has changed!");
-                    }
-                }
-                if (oldBlocks.length != newBlocks.length) {
-                    throw new IOException("Blocklist for " + src + " now has different length");
-                }
-            }
-            this.blocks = newBlocks;
-            this.nodes = (DatanodeInfo[][]) nodeV.toArray(new DatanodeInfo[nodeV.size()][]);
-            this.currentNode = null;
-        }
-
-        /**
-         * Returns the datanode from which the stream is currently reading.
-         */
-        public DatanodeInfo getCurrentDatanode() {
-          return currentNode;
-        }
-
-        /**
-         * Returns the block containing the target position. 
-         */
-        public Block getCurrentBlock() {
-          return currentBlock;
-        }
-
-
-        /**
-         * Used by the automatic tests to detemine blocks locations of a
-         * file
-         */
-        synchronized DatanodeInfo[][] getDataNodes() {
-          return nodes;
-        }
-
-        /**
-         * Open a DataInputStream to a DataNode so that it can be read from.
-         * We get block ID and the IDs of the destinations at startup, from the namenode.
-         */
-        private synchronized DatanodeInfo blockSeekTo(long target) throws IOException {
-            if (target >= filelen) {
-                throw new IOException("Attempted to read past end of file");
-            }
-
-            if (s != null) {
-                s.close();
-                s = null;
-            }
+    private File newBackupFile() throws IOException {
+      File result = conf.getFile("dfs.client.buffer.dir",
+                                 "tmp"+File.separator+
+                                 "client-"+Math.abs(r.nextLong()));
+      result.deleteOnExit();
+      return result;
+    }
 
-            //
-            // Compute desired block
-            //
-            int targetBlock = -1;
-            long targetBlockStart = 0;
-            long targetBlockEnd = 0;
-            for (int i = 0; i < blocks.length; i++) {
-                long blocklen = blocks[i].getNumBytes();
-                targetBlockEnd = targetBlockStart + blocklen - 1;
-
-                if (target >= targetBlockStart && target <= targetBlockEnd) {
-                    targetBlock = i;
-                    break;
-                } else {
-                    targetBlockStart = targetBlockEnd + 1;                    
-                }
-            }
-            if (targetBlock < 0) {
-                throw new IOException("Impossible situation: could not find target position " + target);
-            }
-            long offsetIntoBlock = target - targetBlockStart;
-
-            //
-            // Connect to best DataNode for desired Block, with potential offset
-            //
-            DatanodeInfo chosenNode = null;
-            while (s == null) {
-                DNAddrPair retval = chooseDataNode(targetBlock);
-                chosenNode = retval.info;
-                InetSocketAddress targetAddr = retval.addr;
-
-                try {
-                    s = new Socket();
-                    s.connect(targetAddr, READ_TIMEOUT);
-                    s.setSoTimeout(READ_TIMEOUT);
-
-                    //
-                    // Xmit header info to datanode
-                    //
-                    DataOutputStream out = new DataOutputStream(new BufferedOutputStream(s.getOutputStream()));
-                    out.write(OP_READSKIP_BLOCK);
-                    blocks[targetBlock].write(out);
-                    out.writeLong(offsetIntoBlock);
-                    out.flush();
-
-                    //
-                    // Get bytes in block, set streams
-                    //
-                    DataInputStream in = new DataInputStream(new BufferedInputStream(s.getInputStream()));
-                    long curBlockSize = in.readLong();
-                    long amtSkipped = in.readLong();
-                    if (curBlockSize != blocks[targetBlock].len) {
-                        throw new IOException("Recorded block size is " + blocks[targetBlock].len + ", but datanode reports size of " + curBlockSize);
-                    }
-                    if (amtSkipped != offsetIntoBlock) {
-                        throw new IOException("Asked for offset of " + offsetIntoBlock + ", but only received offset of " + amtSkipped);
-                    }
-
-                    this.pos = target;
-                    this.blockEnd = targetBlockEnd;
-                    this.currentBlock = blocks[targetBlock];
-                    this.blockStream = in;
-                    return chosenNode;
-                } catch (IOException ex) {
-                    // Put chosen node into dead list, continue
-                    LOG.debug("Failed to connect to " + targetAddr + ":" 
-                              + StringUtils.stringifyException(ex));
-                    deadNodes.add(chosenNode);
-                    if (s != null) {
-                        try {
-                            s.close();
-                        } catch (IOException iex) {
-                        }                        
-                    }
-                    s = null;
-                }
-            }
-            return chosenNode;
+    /**
+     * Open a DataOutputStream to a DataNode so that it can be written to.
+     * This happens when a file is created and each time a new block is allocated.
+     * Must get block ID and the IDs of the destinations from the namenode.
+     */
+    private synchronized void nextBlockOutputStream() throws IOException {
+      boolean retry = false;
+      long startTime = System.currentTimeMillis();
+      do {
+        retry = false;
+                
+        LocatedBlock lb;
+        if (firstTime) {
+          lb = locateNewBlock();
+        } else {
+          lb = locateFollowingBlock(startTime);
         }
 
-        /**
-         * Close it down!
-         */
-        public synchronized void close() throws IOException {
-            checkOpen();
-            if (closed) {
-                throw new IOException("Stream closed");
-            }
-
-            if (s != null) {
-                blockStream.close();
-                s.close();
-                s = null;
-            }
-            super.close();
-            closed = true;
+        block = lb.getBlock();
+        if ( block.getNumBytes() < bytesWrittenToBlock ) {
+          block.setNumBytes( bytesWrittenToBlock );
         }
+        DatanodeInfo nodes[] = lb.getLocations();
 
-        /**
-         * Basic read()
-         */
-        public synchronized int read() throws IOException {
-            checkOpen();
-            if (closed) {
-                throw new IOException("Stream closed");
-            }
-            int result = -1;
-            if (pos < filelen) {
-                if (pos > blockEnd) {
-                   currentNode = blockSeekTo(pos);
-                }
-                result = blockStream.read();
-                if (result >= 0) {
-                    pos++;
-                }
+        //
+        // Connect to first DataNode in the list.  Abort if this fails.
+        //
+        InetSocketAddress target = DataNode.createSocketAddr(nodes[0].getName());
+        try {
+          s = new Socket();
+          s.connect(target, READ_TIMEOUT);
+          s.setSoTimeout(replication * READ_TIMEOUT);
+          datanodeName = nodes[0].getName();
+        } catch (IOException ie) {
+          // Connection failed.  Let's wait a little bit and retry
+          try {
+            if (System.currentTimeMillis() - startTime > 5000) {
+              LOG.info("Waiting to find target node: " + target);
             }
-            return result;
+            Thread.sleep(6000);
+          } catch (InterruptedException iex) {
+          }
+          if (firstTime) {
+            namenode.abandonFileInProgress(src.toString(), 
+                                           clientName);
+          } else {
+            namenode.abandonBlock(block, src.toString());
+          }
+          retry = true;
+          continue;
         }
 
-        /**
-         * Read the entire buffer.
-         */
-        public synchronized int read(byte buf[], int off, int len) throws IOException {
-            checkOpen();
-            if (closed) {
-                throw new IOException("Stream closed");
-            }
-            if (pos < filelen) {
-              int retries = 2;
-              while (retries > 0) {
-                try {
-                  if (pos > blockEnd) {
-                      currentNode = blockSeekTo(pos);
-                  }
-                  int realLen = Math.min(len, (int) (blockEnd - pos + 1));
-                  int result = blockStream.read(buf, off, realLen);
-                  if (result >= 0) {
-                      pos += result;
-                  }
-                  return result;
-                } catch (IOException e) {
-                  if (retries == 1) {
-                    LOG.warn("DFS Read: " + StringUtils.stringifyException(e));
-                  }
-                  blockEnd = -1;
-                  if (currentNode != null) { deadNodes.add(currentNode); }
-                  if (--retries == 0) {
-                    throw e;
-                  }
-                }
-              }
-            }
-            return -1;
+        //
+        // Xmit header info to datanode
+        //
+        DataOutputStream out = new DataOutputStream(new BufferedOutputStream(s.getOutputStream()));
+        out.write(OP_WRITE_BLOCK);
+        out.writeBoolean(true);
+        block.write(out);
+        out.writeInt(nodes.length);
+        for (int i = 0; i < nodes.length; i++) {
+          nodes[i].write(out);
         }
+        out.write(CHUNKED_ENCODING);
+        blockStream = out;
+        blockReplyStream = new DataInputStream(new BufferedInputStream(s.getInputStream()));
+      } while (retry);
+      firstTime = false;
+    }
 
-        
-        private DNAddrPair chooseDataNode(int blockId)
-        throws IOException {
-          int failures = 0;
-          while (true) {
-            try {
-              DatanodeInfo chosenNode = bestNode(nodes[blockId], deadNodes);
-              InetSocketAddress targetAddr = DataNode.createSocketAddr(chosenNode.getName());
-              return new DNAddrPair(chosenNode, targetAddr);
-            } catch (IOException ie) {
-              String blockInfo =
-                  blocks[blockId]+" file="+src;
-              if (failures >= MAX_BLOCK_ACQUIRE_FAILURES) {
-                throw new IOException("Could not obtain block: " + blockInfo);
-              }
-              if (nodes[blockId] == null || nodes[blockId].length == 0) {
-                LOG.info("No node available for block: " + blockInfo);
-              }
-              LOG.info("Could not obtain block " + blockId + " from any node:  " + ie);
+    private LocatedBlock locateNewBlock() throws IOException {     
+      int retries = 3;
+      while (true) {
+        while (true) {
+          try {
+            return namenode.create(src.toString(), clientName.toString(),
+                                   overwrite, replication, blockSize);
+          } catch (RemoteException e) {
+            if (--retries == 0 || 
+                !AlreadyBeingCreatedException.class.getName().
+                equals(e.getClassName())) {
+              throw e;
+            } else {
+              // because failed tasks take upto LEASE_PERIOD to
+              // release their pendingCreates files, if the file
+              // we want to create is already being created, 
+              // wait and try again.
+              LOG.info(StringUtils.stringifyException(e));
               try {
-                Thread.sleep(3000);
-              } catch (InterruptedException iex) {
+                Thread.sleep(LEASE_SOFTLIMIT_PERIOD);
+              } catch (InterruptedException ie) {
               }
-              deadNodes.clear(); //2nd option is to remove only nodes[blockId]
-              openInfo();
-              failures++;
-              continue;
             }
           }
-        } 
+        }
+      }
+    }
         
-        private void fetchBlockByteRange(int blockId, long start,
-            long end, byte[] buf, int offset) throws IOException {
-          //
-          // Connect to best DataNode for desired Block, with potential offset
-          //
-          Socket dn = null;
-          while (dn == null) {
-            DNAddrPair retval = chooseDataNode(blockId);
-            DatanodeInfo chosenNode = retval.info;
-            InetSocketAddress targetAddr = retval.addr;
-            
-            try {
-              dn = new Socket();
-              dn.connect(targetAddr, READ_TIMEOUT);
-              dn.setSoTimeout(READ_TIMEOUT);
-              
-              //
-              // Xmit header info to datanode
-              //
-              DataOutputStream out = new DataOutputStream(new BufferedOutputStream(dn.getOutputStream()));
-              out.write(OP_READ_RANGE_BLOCK);
-              blocks[blockId].write(out);
-              out.writeLong(start);
-              out.writeLong(end);
-              out.flush();
-              
-              //
-              // Get bytes in block, set streams
-              //
-              DataInputStream in = new DataInputStream(new BufferedInputStream(dn.getInputStream()));
-              long curBlockSize = in.readLong();
-              long actualStart = in.readLong();
-              long actualEnd = in.readLong();
-              if (curBlockSize != blocks[blockId].len) {
-                throw new IOException("Recorded block size is " +
-                    blocks[blockId].len + ", but datanode reports size of " +
-                    curBlockSize);
-              }
-              if ((actualStart != start) || (actualEnd != end)) {
-                throw new IOException("Asked for byte range  " + start +
-                    "-" + end + ", but only received range " + actualStart +
-                    "-" + actualEnd);
+    private LocatedBlock locateFollowingBlock(long start
+                                              ) throws IOException {     
+      int retries = 5;
+      long sleeptime = 400;
+      while (true) {
+        long localstart = System.currentTimeMillis();
+        while (true) {
+          try {
+            return namenode.addBlock(src.toString(), 
+                                     clientName.toString());
+          } catch (RemoteException e) {
+            if (--retries == 0 || 
+                !NotReplicatedYetException.class.getName().
+                equals(e.getClassName())) {
+              throw e;
+            } else {
+              LOG.info(StringUtils.stringifyException(e));
+              if (System.currentTimeMillis() - localstart > 5000) {
+                LOG.info("Waiting for replication for " + 
+                         (System.currentTimeMillis() - localstart)/1000 + 
+                         " seconds");
               }
-              int nread = in.read(buf, offset, (int)(end - start + 1));
-            } catch (IOException ex) {
-              // Put chosen node into dead list, continue
-              LOG.debug("Failed to connect to " + targetAddr + ":" 
-                        + StringUtils.stringifyException(ex));
-              deadNodes.add(chosenNode);
-              if (dn != null) {
-                try {
-                  dn.close();
-                } catch (IOException iex) {
-                }
+              try {
+                LOG.debug("NotReplicatedYetException sleeping " + src +
+                          " retries left " + retries);
+                Thread.sleep(sleeptime);
+              } catch (InterruptedException ie) {
               }
-              dn = null;
-            }
-          }
-        }
-        
-        public int read(long position, byte[] buf, int off, int len)
-        throws IOException {
-          // sanity checks
-          checkOpen();
-          if (closed) {
-            throw new IOException("Stream closed");
-          }
-          if ((position < 0) || (position > filelen)) {
-            return -1;
+            }                
           }
-          int realLen = len;
-          if ((position + len) > filelen) {
-            realLen = (int)(filelen - position);
-          }
-          // determine the block and byte range within the block
-          // corresponding to position and realLen
-          int targetBlock = -1;
-          long targetStart = 0;
-          long targetEnd = 0;
-          for (int idx = 0; idx < blocks.length; idx++) {
-            long blocklen = blocks[idx].getNumBytes();
-            targetEnd = targetStart + blocklen - 1;
-            if (position >= targetStart && position <= targetEnd) {
-              targetBlock = idx;
-              targetStart = position - targetStart;
-              targetEnd = Math.min(blocklen, targetStart + realLen) - 1;
-              realLen = (int)(targetEnd - targetStart + 1);
-              break;
-            }
-            targetStart += blocklen;
-          }
-          if (targetBlock < 0) {
-            throw new IOException(
-                "Impossible situation: could not find target position "+
-                position);
-          }
-          fetchBlockByteRange(targetBlock, targetStart, targetEnd, buf, off);
-          return realLen;
-        }
-        
-        /**
-         * Seek to a new arbitrary location
-         */
-        public synchronized void seek(long targetPos) throws IOException {
-            if (targetPos > filelen) {
-                throw new IOException("Cannot seek after EOF");
-            }
-            boolean done = false;
-            if (pos <= targetPos && targetPos <= blockEnd) {
-                //
-                // If this seek is to a positive position in the current
-                // block, and this piece of data might already be lying in
-                // the TCP buffer, then just eat up the intervening data.
-                //
-                int diff = (int)(targetPos - pos);
-                if (diff <= TCP_WINDOW_SIZE) {
-                  blockStream.skipBytes(diff);
-                  pos += diff;
-                  assert(pos == targetPos);
-                  done = true;
-                }
-            }
-            if (!done) {
-                pos = targetPos;
-                blockEnd = -1;
-            }
-        }
-
-        /**
-         * Seek to given position on a node other than the current node.  If
-         * a node other than the current node is found, then returns true. 
-         * If another node could not be found, then returns false.
-         */
-        public synchronized boolean seekToNewSource(long targetPos) throws IOException {
-            boolean markedDead = deadNodes.contains(currentNode);
-            deadNodes.add(currentNode);
-            DatanodeInfo oldNode = currentNode;
-            DatanodeInfo newNode = blockSeekTo(targetPos);
-            if ( !markedDead ) {
-                /* remove it from deadNodes. blockSeekTo could have cleared 
-                 * deadNodes and added currentNode again. Thats ok. */
-                deadNodes.remove(oldNode);
-            }
-            if (!oldNode.getStorageID().equals(newNode.getStorageID())) {
-                currentNode = newNode;
-                return true;
-            } else {
-                return false;
-            }
-        }
-        
-        /**
-         */
-        public synchronized long getPos() throws IOException {
-            return pos;
-        }
-
-        /**
-         */
-        public synchronized int available() throws IOException {
-            if (closed) {
-                throw new IOException("Stream closed");
-            }
-            return (int) (filelen - pos);
         }
+      } 
+    }
 
-        /**
-         * We definitely don't support marks
-         */
-        public boolean markSupported() {
-            return false;
-        }
-        public void mark(int readLimit) {
-        }
-        public void reset() throws IOException {
-            throw new IOException("Mark not supported");
-        }
+    /**
+     * We're referring to the file pos here
+     */
+    public synchronized long getPos() throws IOException {
+      return filePos;
     }
-    
-    static class DFSDataInputStream extends FSDataInputStream {
-      DFSDataInputStream(DFSInputStream in, Configuration conf)
-      throws IOException {
-        super(in, conf);
+			
+    /**
+     * Writes the specified byte to this output stream.
+     */
+    public synchronized void write(int b) throws IOException {
+      checkOpen();
+      if (closed) {
+        throw new IOException("Stream closed");
       }
-      
-      DFSDataInputStream(DFSInputStream in, int bufferSize) throws IOException {
-        super(in, bufferSize);
+
+      if ((bytesWrittenToBlock + pos == blockSize) ||
+          (pos >= BUFFER_SIZE)) {
+        flush();
       }
-      
-      /**
-       * Returns the datanode from which the stream is currently reading.
-       */
-      public DatanodeInfo getCurrentDatanode() {
-        return ((DFSInputStream)inStream).getCurrentDatanode();
+      outBuf[pos++] = (byte) b;
+      filePos++;
+    }
+
+    /**
+     * Writes the specified bytes to this output stream.
+     */
+    public synchronized void write(byte b[], int off, int len)
+      throws IOException {
+      checkOpen();
+      if (closed) {
+        throw new IOException("Stream closed");
       }
-      
-      /**
-       * Returns the block containing the target position. 
-       */
-      public Block getCurrentBlock() {
-        return ((DFSInputStream)inStream).getCurrentBlock();
+      while (len > 0) {
+        int remaining = Math.min(BUFFER_SIZE - pos,
+                                 (int)((blockSize - bytesWrittenToBlock) - pos));
+        int toWrite = Math.min(remaining, len);
+        System.arraycopy(b, off, outBuf, pos, toWrite);
+        pos += toWrite;
+        off += toWrite;
+        len -= toWrite;
+        filePos += toWrite;
+
+        if ((bytesWrittenToBlock + pos >= blockSize) ||
+            (pos == BUFFER_SIZE)) {
+          flush();
+        }
       }
+    }
 
-      /**
-       * Used by the automatic tests to detemine blocks locations of a
-       * file
-       */
-      synchronized DatanodeInfo[][] getDataNodes() {
-        return ((DFSInputStream)inStream).getDataNodes();
+    /**
+     * Flush the buffer, getting a stream to a new block if necessary.
+     */
+    public synchronized void flush() throws IOException {
+      checkOpen();
+      if (closed) {
+        throw new IOException("Stream closed");
       }
 
+      if (bytesWrittenToBlock + pos >= blockSize) {
+        flushData((int) blockSize - bytesWrittenToBlock);
+      }
+      if (bytesWrittenToBlock == blockSize) {
+        endBlock();
+      }
+      flushData(pos);
     }
 
-    /****************************************************************
-     * DFSOutputStream creates files from a stream of bytes.
-     ****************************************************************/
-    class DFSOutputStream extends OutputStream {
-        private Socket s;
-        boolean closed = false;
-
-        private byte outBuf[] = new byte[BUFFER_SIZE];
-        private int pos = 0;
-
-        private UTF8 src;
-        private boolean overwrite;
-        private short replication;
-        private boolean firstTime = true;
-        private DataOutputStream blockStream;
-        private DataInputStream blockReplyStream;
-        private File backupFile;
-        private OutputStream backupStream;
-        private Block block;
-        private long filePos = 0;
-        private int bytesWrittenToBlock = 0;
-        private String datanodeName;
-        private long blockSize;
-
-        private Progressable progress;
-        /**
-         * Create a new output stream to the given DataNode.
-         */
-        public DFSOutputStream(UTF8 src, boolean overwrite, 
-                               short replication, long blockSize,
-                               Progressable progress
-                               ) throws IOException {
-            this.src = src;
-            this.overwrite = overwrite;
-            this.replication = replication;
-            this.backupFile = newBackupFile();
-            this.blockSize = blockSize;
-            this.backupStream = new FileOutputStream(backupFile);
-            this.progress = progress;
-            if (progress != null) {
-                LOG.debug("Set non-null progress callback on DFSOutputStream "+src);
-            }
+    /**
+     * Actually flush the accumulated bytes to the remote node,
+     * but no more bytes than the indicated number.
+     */
+    private synchronized void flushData(int maxPos) throws IOException {
+      int workingPos = Math.min(pos, maxPos);
+            
+      if (workingPos > 0) {
+        if ( backupStream == null ) {
+          throw new IOException( "Trying to write to backupStream " +
+                                 "but it already closed or not open");
         }
+        //
+        // To the local block backup, write just the bytes
+        //
+        backupStream.write(outBuf, 0, workingPos);
+
+        //
+        // Track position
+        //
+        bytesWrittenToBlock += workingPos;
+        System.arraycopy(outBuf, workingPos, outBuf, 0, pos - workingPos);
+        pos -= workingPos;
+      }
+    }
 
-        /* Wrapper for closing backupStream. This sets backupStream to null so
-         * that we do not attempt to write to backupStream that could be
-         * invalid in subsequent writes. Otherwise we might end trying to write
-         * filedescriptor that we don't own.
-         */
-        private void closeBackupStream() throws IOException {
-          if ( backupStream != null ) {
-            OutputStream stream = backupStream;
-            backupStream = null;
-            stream.close();
-          }   
-        }
-        /* Similar to closeBackupStream(). Theoritically deleting a file
-         * twice could result in deleting a file that we should not.
-         */
-        private void deleteBackupFile() {
-          if ( backupFile != null ) {
-            File file = backupFile;
-            backupFile = null;
-            file.delete();
+    /**
+     * We're done writing to the current block.
+     */
+    private synchronized void endBlock() throws IOException {
+      long sleeptime = 400;
+      //
+      // Done with local copy
+      //
+      closeBackupStream();
+
+      //
+      // Send it to datanode
+      //
+      boolean sentOk = false;
+      int remainingAttempts = 
+        conf.getInt("dfs.client.block.write.retries", 3);
+      while (!sentOk) {
+        nextBlockOutputStream();
+        InputStream in = new FileInputStream(backupFile);
+        try {
+          byte buf[] = new byte[BUFFER_SIZE];
+          int bytesRead = in.read(buf);
+          while (bytesRead > 0) {
+            blockStream.writeLong((long) bytesRead);
+            blockStream.write(buf, 0, bytesRead);
+            if (progress != null) { progress.progress(); }
+            bytesRead = in.read(buf);
           }
-        }
-        
-        private File newBackupFile() throws IOException {
-          File result = conf.getFile("dfs.client.buffer.dir",
-                                     "tmp"+File.separator+
-                                     "client-"+Math.abs(r.nextLong()));
-          result.deleteOnExit();
-          return result;
-        }
-
-        /**
-         * Open a DataOutputStream to a DataNode so that it can be written to.
-         * This happens when a file is created and each time a new block is allocated.
-         * Must get block ID and the IDs of the destinations from the namenode.
-         */
-        private synchronized void nextBlockOutputStream() throws IOException {
-            boolean retry = false;
-            long startTime = System.currentTimeMillis();
-            do {
-                retry = false;
-                
-                LocatedBlock lb;
-                if (firstTime) {
-                  lb = locateNewBlock();
-                } else {
-                  lb = locateFollowingBlock(startTime);
-                }
-
-                block = lb.getBlock();
-                if ( block.getNumBytes() < bytesWrittenToBlock ) {
-                  block.setNumBytes( bytesWrittenToBlock );
-                }
-                DatanodeInfo nodes[] = lb.getLocations();
-
-                //
-                // Connect to first DataNode in the list.  Abort if this fails.
-                //
-                InetSocketAddress target = DataNode.createSocketAddr(nodes[0].getName());
-                try {
-                    s = new Socket();
-                    s.connect(target, READ_TIMEOUT);
-                    s.setSoTimeout(replication * READ_TIMEOUT);
-                    datanodeName = nodes[0].getName();
-                } catch (IOException ie) {
-                    // Connection failed.  Let's wait a little bit and retry
-                    try {
-                        if (System.currentTimeMillis() - startTime > 5000) {
-                            LOG.info("Waiting to find target node: " + target);
-                        }
-                        Thread.sleep(6000);
-                    } catch (InterruptedException iex) {
-                    }
-                    if (firstTime) {
-                        namenode.abandonFileInProgress(src.toString(), 
-                                                       clientName);
-                    } else {
-                        namenode.abandonBlock(block, src.toString());
-                    }
-                    retry = true;
-                    continue;
-                }
-
-                //
-                // Xmit header info to datanode
-                //
-                DataOutputStream out = new DataOutputStream(new BufferedOutputStream(s.getOutputStream()));
-                out.write(OP_WRITE_BLOCK);
-                out.writeBoolean(true);
-                block.write(out);
-                out.writeInt(nodes.length);
-                for (int i = 0; i < nodes.length; i++) {
-                    nodes[i].write(out);
-                }
-                out.write(CHUNKED_ENCODING);
-                blockStream = out;
-                blockReplyStream = new DataInputStream(new BufferedInputStream(s.getInputStream()));
-            } while (retry);
-            firstTime = false;
-        }
-
-        private LocatedBlock locateNewBlock() throws IOException {     
-          int retries = 3;
-          while (true) {
-            while (true) {
-              try {
-                return namenode.create(src.toString(), clientName.toString(),
-                                       overwrite, replication, blockSize);
-              } catch (RemoteException e) {
-                if (--retries == 0 || 
-                    !AlreadyBeingCreatedException.class.getName().
-                        equals(e.getClassName())) {
-                  throw e;
-                } else {
-                  // because failed tasks take upto LEASE_PERIOD to
-                  // release their pendingCreates files, if the file
-                  // we want to create is already being created, 
-                  // wait and try again.
-                  LOG.info(StringUtils.stringifyException(e));
-                  try {
-                    Thread.sleep(LEASE_SOFTLIMIT_PERIOD);
-                  } catch (InterruptedException ie) {
-                  }
-                }
-              }
-            }
+          internalClose();
+          sentOk = true;
+        } catch (IOException ie) {
+          handleSocketException(ie);
+          remainingAttempts -= 1;
+          if (remainingAttempts == 0) {
+            throw ie;
           }
+          try {
+            Thread.sleep(sleeptime);
+          } catch (InterruptedException e) {
+          }
+        } finally {
+          in.close();
         }
-        
-        private LocatedBlock locateFollowingBlock(long start
-                                                  ) throws IOException {     
-          int retries = 5;
-          long sleeptime = 400;
-          while (true) {
-            long localstart = System.currentTimeMillis();
-            while (true) {
-              try {
-                return namenode.addBlock(src.toString(), 
-                                         clientName.toString());
-              } catch (RemoteException e) {
-                if (--retries == 0 || 
-                    !NotReplicatedYetException.class.getName().
-                        equals(e.getClassName())) {
-                  throw e;
-                } else {
-                  LOG.info(StringUtils.stringifyException(e));
-                  if (System.currentTimeMillis() - localstart > 5000) {
-                    LOG.info("Waiting for replication for " + 
-                             (System.currentTimeMillis() - localstart)/1000 + 
-                             " seconds");
-                  }
-                  try {
-                    LOG.debug("NotReplicatedYetException sleeping " + src +
-                              " retries left " + retries);
-                    Thread.sleep(sleeptime);
-                  } catch (InterruptedException ie) {
-                  }
-                }                
-              }
-            }
-          } 
-        }
-
-        /**
-         * We're referring to the file pos here
-         */
-        public synchronized long getPos() throws IOException {
-            return filePos;
-        }
-			
-        /**
-         * Writes the specified byte to this output stream.
-         */
-        public synchronized void write(int b) throws IOException {
-            checkOpen();
-            if (closed) {
-                throw new IOException("Stream closed");
-            }
-
-            if ((bytesWrittenToBlock + pos == blockSize) ||
-                (pos >= BUFFER_SIZE)) {
-                flush();
-            }
-            outBuf[pos++] = (byte) b;
-            filePos++;
-        }
-
-        /**
-         * Writes the specified bytes to this output stream.
-         */
-      public synchronized void write(byte b[], int off, int len)
-        throws IOException {
-            checkOpen();
-            if (closed) {
-                throw new IOException("Stream closed");
-            }
-            while (len > 0) {
-              int remaining = Math.min(BUFFER_SIZE - pos,
-                  (int)((blockSize - bytesWrittenToBlock) - pos));
-              int toWrite = Math.min(remaining, len);
-              System.arraycopy(b, off, outBuf, pos, toWrite);
-              pos += toWrite;
-              off += toWrite;
-              len -= toWrite;
-              filePos += toWrite;
-
-              if ((bytesWrittenToBlock + pos >= blockSize) ||
-                  (pos == BUFFER_SIZE)) {
-                flush();
-              }
-            }
-        }
-
-        /**
-         * Flush the buffer, getting a stream to a new block if necessary.
-         */
-        public synchronized void flush() throws IOException {
-            checkOpen();
-            if (closed) {
-                throw new IOException("Stream closed");
-            }
+      }
 
-            if (bytesWrittenToBlock + pos >= blockSize) {
-                flushData((int) blockSize - bytesWrittenToBlock);
-            }
-            if (bytesWrittenToBlock == blockSize) {
-                endBlock();
-            }
-            flushData(pos);
-        }
+      bytesWrittenToBlock = 0;
+      //
+      // Delete local backup, start new one
+      //
+      deleteBackupFile();
+      File tmpFile = newBackupFile();
+      bytesWrittenToBlock = 0;
+      backupStream = new FileOutputStream(tmpFile);
+      backupFile = tmpFile;
+    }
 
-        /**
-         * Actually flush the accumulated bytes to the remote node,
-         * but no more bytes than the indicated number.
-         */
-        private synchronized void flushData(int maxPos) throws IOException {
-            int workingPos = Math.min(pos, maxPos);
-            
-            if (workingPos > 0) {
-                if ( backupStream == null ) {
-                    throw new IOException( "Trying to write to backupStream " +
-                                           "but it already closed or not open");
-                }
-                //
-                // To the local block backup, write just the bytes
-                //
-                backupStream.write(outBuf, 0, workingPos);
-
-                //
-                // Track position
-                //
-                bytesWrittenToBlock += workingPos;
-                System.arraycopy(outBuf, workingPos, outBuf, 0, pos - workingPos);
-                pos -= workingPos;
-            }
+    /**
+     * Close down stream to remote datanode.
+     */
+    private synchronized void internalClose() throws IOException {
+      try {
+        blockStream.writeLong(0);
+        blockStream.flush();
+
+        long complete = blockReplyStream.readLong();
+        if (complete != WRITE_COMPLETE) {
+          LOG.info("Did not receive WRITE_COMPLETE flag: " + complete);
+          throw new IOException("Did not receive WRITE_COMPLETE_FLAG: " + complete);
         }
+      } catch (IOException ie) {
+        throw (IOException)
+          new IOException("failure closing block of file " +
+                          src.toString() + " to node " +
+                          (datanodeName == null ? "?" : datanodeName)
+                          ).initCause(ie);
+      }
+                    
+      LocatedBlock lb = new LocatedBlock();
+      lb.readFields(blockReplyStream);
 
-        /**
-         * We're done writing to the current block.
-         */
-        private synchronized void endBlock() throws IOException {
-            long sleeptime = 400;
-            //
-            // Done with local copy
-            //
-            closeBackupStream();
-
-            //
-            // Send it to datanode
-            //
-            boolean sentOk = false;
-            int remainingAttempts = 
-               conf.getInt("dfs.client.block.write.retries", 3);
-            while (!sentOk) {
-                nextBlockOutputStream();
-                InputStream in = new FileInputStream(backupFile);
-                try {
-                    byte buf[] = new byte[BUFFER_SIZE];
-                    int bytesRead = in.read(buf);
-                    while (bytesRead > 0) {
-                        blockStream.writeLong((long) bytesRead);
-                        blockStream.write(buf, 0, bytesRead);
-                        if (progress != null) { progress.progress(); }
-                        bytesRead = in.read(buf);
-                    }
-                    internalClose();
-                    sentOk = true;
-                } catch (IOException ie) {
-                    handleSocketException(ie);
-                    remainingAttempts -= 1;
-                    if (remainingAttempts == 0) {
-                      throw ie;
-                    }
-                    try {
-                      Thread.sleep(sleeptime);
-                    } catch (InterruptedException e) {
-                    }
-                } finally {
-                  in.close();
-                }
-            }
+      s.close();
+      s = null;
+    }
 
-            bytesWrittenToBlock = 0;
-            //
-            // Delete local backup, start new one
-            //
-            deleteBackupFile();
-            File tmpFile = newBackupFile();
-            bytesWrittenToBlock = 0;
-            backupStream = new FileOutputStream(tmpFile);
-            backupFile = tmpFile;
+    private void handleSocketException(IOException ie) throws IOException {
+      LOG.warn("Error while writing.", ie);
+      try {
+        if (s != null) {
+          s.close();
+          s = null;
         }
+      } catch (IOException ie2) {
+        LOG.warn("Error closing socket.", ie2);
+      }
+      namenode.abandonBlock(block, src.toString());
+    }
 
-        /**
-         * Close down stream to remote datanode.
-         */
-        private synchronized void internalClose() throws IOException {
+    /**
+     * Closes this output stream and releases any system 
+     * resources associated with this stream.
+     */
+    public synchronized void close() throws IOException {
+      checkOpen();
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
+          
+      try {
+        flush();
+        if (filePos == 0 || bytesWrittenToBlock != 0) {
           try {
-            blockStream.writeLong(0);
-            blockStream.flush();
-
-            long complete = blockReplyStream.readLong();
-            if (complete != WRITE_COMPLETE) {
-                LOG.info("Did not receive WRITE_COMPLETE flag: " + complete);
-                throw new IOException("Did not receive WRITE_COMPLETE_FLAG: " + complete);
-            }
-          } catch (IOException ie) {
-            throw (IOException)
-                  new IOException("failure closing block of file " +
-                                  src.toString() + " to node " +
-                                  (datanodeName == null ? "?" : datanodeName)
-                                 ).initCause(ie);
+            endBlock();
+          } catch (IOException e) {
+            namenode.abandonFileInProgress(src.toString(), clientName);
+            throw e;
           }
-                    
-            LocatedBlock lb = new LocatedBlock();
-            lb.readFields(blockReplyStream);
-
-            s.close();
-            s = null;
         }
+            
+        closeBackupStream();
+        deleteBackupFile();
 
-        private void handleSocketException(IOException ie) throws IOException {
-          LOG.warn("Error while writing.", ie);
-          try {
-            if (s != null) {
-              s.close();
-              s = null;
-            }
-          } catch (IOException ie2) {
-            LOG.warn("Error closing socket.", ie2);
-          }
-          namenode.abandonBlock(block, src.toString());
+        if (s != null) {
+          s.close();
+          s = null;
         }
+        super.close();
 
-        /**
-         * Closes this output stream and releases any system 
-         * resources associated with this stream.
-         */
-        public synchronized void close() throws IOException {
-          checkOpen();
-          if (closed) {
-              throw new IOException("Stream closed");
-          }
-          
-          try {
-            flush();
-            if (filePos == 0 || bytesWrittenToBlock != 0) {
-              try {
-                endBlock();
-              } catch (IOException e) {
-                namenode.abandonFileInProgress(src.toString(), clientName);
-                throw e;
-              }
-            }
-            
-            closeBackupStream();
-            deleteBackupFile();
-
-            if (s != null) {
-                s.close();
-                s = null;
-            }
-            super.close();
-
-            long localstart = System.currentTimeMillis();
-            boolean fileComplete = false;
-            while (! fileComplete) {
-              fileComplete = namenode.complete(src.toString(), clientName.toString());
-              if (!fileComplete) {
-                try {
-                  Thread.sleep(400);
-                  if (System.currentTimeMillis() - localstart > 5000) {
-                    LOG.info("Could not complete file, retrying...");
-                  }
-                } catch (InterruptedException ie) {
-                }
+        long localstart = System.currentTimeMillis();
+        boolean fileComplete = false;
+        while (! fileComplete) {
+          fileComplete = namenode.complete(src.toString(), clientName.toString());
+          if (!fileComplete) {
+            try {
+              Thread.sleep(400);
+              if (System.currentTimeMillis() - localstart > 5000) {
+                LOG.info("Could not complete file, retrying...");
               }
-            }
-            closed = true;
-          } finally {
-            synchronized (pendingCreates) {
-              pendingCreates.remove(src.toString());
+            } catch (InterruptedException ie) {
             }
           }
         }
+        closed = true;
+      } finally {
+        synchronized (pendingCreates) {
+          pendingCreates.remove(src.toString());
+        }
+      }
     }
+  }
 }

+ 105 - 105
src/java/org/apache/hadoop/dfs/DFSFileInfo.java

@@ -29,111 +29,111 @@ import java.io.*;
  * @author Mike Cafarella
  ******************************************************/
 class DFSFileInfo implements Writable {
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (DFSFileInfo.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new DFSFileInfo(); }
-         });
-    }
-
-    UTF8 path;
-    long len;
-    long contentsLen;
-    boolean isDir;
-    short blockReplication;
-    long blockSize;
-
-    /**
-     */
-    public DFSFileInfo() {
-    }
-
-    /**
-     * Create DFSFileInfo by file INode 
-     */
-    public DFSFileInfo( FSDirectory.INode node ) {
-        this.path = new UTF8(node.computeName());
-        this.isDir = node.isDir();
-        if( isDir ) {
-          this.len = 0;
-          this.contentsLen = node.computeContentsLength();
-        } else 
-          this.len = this.contentsLen = node.computeFileLength();
-        this.blockReplication = node.getReplication();
-        blockSize = node.getBlockSize();
-    }
-
-    /**
-     */
-    public String getPath() {
-        return path.toString();
-    }
-
-    /**
-     */
-    public String getName() {
-        return new Path(path.toString()).getName();
-    }
-
-    /**
-     */
-    public String getParent() {
-      return new Path(path.toString()).getParent().toString();
-    }
-
-    /**
-     */
-    public long getLen() {
-        return len;
-    }
-
-    /**
-     */
-    public long getContentsLen() {
-        return contentsLen;
-    }
-
-    /**
-     */
-    public boolean isDir() {
-        return isDir;
-    }
-
-    /**
-     */
-    public short getReplication() {
-      return this.blockReplication;
-    }
-
-    /**
-     * Get the block size of the file.
-     * @return the number of bytes
-     */
-    public long getBlockSize() {
-      return blockSize;
-    }
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (DFSFileInfo.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new DFSFileInfo(); }
+       });
+  }
+
+  UTF8 path;
+  long len;
+  long contentsLen;
+  boolean isDir;
+  short blockReplication;
+  long blockSize;
+
+  /**
+   */
+  public DFSFileInfo() {
+  }
+
+  /**
+   * Create DFSFileInfo by file INode 
+   */
+  public DFSFileInfo( FSDirectory.INode node ) {
+    this.path = new UTF8(node.computeName());
+    this.isDir = node.isDir();
+    if( isDir ) {
+      this.len = 0;
+      this.contentsLen = node.computeContentsLength();
+    } else 
+      this.len = this.contentsLen = node.computeFileLength();
+    this.blockReplication = node.getReplication();
+    blockSize = node.getBlockSize();
+  }
+
+  /**
+   */
+  public String getPath() {
+    return path.toString();
+  }
+
+  /**
+   */
+  public String getName() {
+    return new Path(path.toString()).getName();
+  }
+
+  /**
+   */
+  public String getParent() {
+    return new Path(path.toString()).getParent().toString();
+  }
+
+  /**
+   */
+  public long getLen() {
+    return len;
+  }
+
+  /**
+   */
+  public long getContentsLen() {
+    return contentsLen;
+  }
+
+  /**
+   */
+  public boolean isDir() {
+    return isDir;
+  }
+
+  /**
+   */
+  public short getReplication() {
+    return this.blockReplication;
+  }
+
+  /**
+   * Get the block size of the file.
+   * @return the number of bytes
+   */
+  public long getBlockSize() {
+    return blockSize;
+  }
     
-    //////////////////////////////////////////////////
-    // Writable
-    //////////////////////////////////////////////////
-    public void write(DataOutput out) throws IOException {
-        path.write(out);
-        out.writeLong(len);
-        out.writeLong(contentsLen);
-        out.writeBoolean(isDir);
-        out.writeShort(blockReplication);
-        out.writeLong(blockSize);
-    }
-
-    public void readFields(DataInput in) throws IOException {
-        this.path = new UTF8();
-        this.path.readFields(in);
-        this.len = in.readLong();
-        this.contentsLen = in.readLong();
-        this.isDir = in.readBoolean();
-        this.blockReplication = in.readShort();
-        blockSize = in.readLong();
-    }
+  //////////////////////////////////////////////////
+  // Writable
+  //////////////////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    path.write(out);
+    out.writeLong(len);
+    out.writeLong(contentsLen);
+    out.writeBoolean(isDir);
+    out.writeShort(blockReplication);
+    out.writeLong(blockSize);
+  }
+
+  public void readFields(DataInput in) throws IOException {
+    this.path = new UTF8();
+    this.path.readFields(in);
+    this.len = in.readLong();
+    this.contentsLen = in.readLong();
+    this.isDir = in.readBoolean();
+    this.blockReplication = in.readShort();
+    blockSize = in.readLong();
+  }
 }
 

Різницю між файлами не показано, бо вона завелика
+ 703 - 664
src/java/org/apache/hadoop/dfs/DataNode.java


+ 11 - 11
src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java

@@ -94,9 +94,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param xceiverCount # of data transfers at the data node
    */
   public DatanodeDescriptor( DatanodeID nodeID, 
-                      long capacity, 
-                      long remaining,
-                      int xceiverCount ) {
+                             long capacity, 
+                             long remaining,
+                             int xceiverCount ) {
     super( nodeID );
     updateHeartbeat(capacity, remaining, xceiverCount);
     initWorkLists();
@@ -111,11 +111,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param xceiverCount # of data transfers at the data node
    */
   public DatanodeDescriptor( DatanodeID nodeID,
-                              String networkLocation,
-                              String hostName,
-                              long capacity, 
-                              long remaining,
-                              int xceiverCount ) {
+                             String networkLocation,
+                             String hostName,
+                             long capacity, 
+                             long remaining,
+                             int xceiverCount ) {
     super( nodeID, networkLocation, hostName );
     updateHeartbeat( capacity, remaining, xceiverCount);
     initWorkLists();
@@ -133,11 +133,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
   /**
    */
   void addBlock(Block b) {
-      blocks.put(b, b);
+    blocks.put(b, b);
   }
   
   void removeBlock(Block b) {
-      blocks.remove(b);
+    blocks.remove(b);
   }
 
   void resetBlocks() {
@@ -240,7 +240,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
       int numBlocks = 0;
       int i;
       for (i = 0; i < replicateTargetSets.size() && 
-           numTransfers < maxNumTransfers; i++) {
+             numTransfers < maxNumTransfers; i++) {
         numTransfers += replicateTargetSets.get(i).length;
       }
       numBlocks = i;

+ 46 - 46
src/java/org/apache/hadoop/dfs/DatanodeInfo.java

@@ -75,18 +75,18 @@ public class DatanodeInfo extends DatanodeID implements Node {
   }
 
   DatanodeInfo( DatanodeID nodeID ) {
-      super( nodeID );
-      this.capacity = 0L;
-      this.remaining = 0L;
-      this.lastUpdate = 0L;
-      this.xceiverCount = 0;
-      this.adminState = null;    
+    super( nodeID );
+    this.capacity = 0L;
+    this.remaining = 0L;
+    this.lastUpdate = 0L;
+    this.xceiverCount = 0;
+    this.adminState = null;    
   }
   
   DatanodeInfo( DatanodeID nodeID, String location, String hostName ) {
-      this(nodeID);
-      this.location = location;
-      this.hostName = hostName;
+    this(nodeID);
+    this.location = location;
+    this.hostName = hostName;
   }
   
   /** The raw capacity. */
@@ -130,7 +130,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   }
   
   public String getPath() {
-      return location+NodeBase.PATH_SEPARATOR_STR+name;
+    return location+NodeBase.PATH_SEPARATOR_STR+name;
   }
 
   
@@ -150,7 +150,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long u = c - r;
     buffer.append("Name: "+name+"\n");
     if(!NetworkTopology.DEFAULT_RACK.equals(location)) {
-        buffer.append("Rack: "+location+"\n");
+      buffer.append("Rack: "+location+"\n");
     }
     if (isDecommissioned()) {
       buffer.append("State          : Decommissioned\n");
@@ -174,7 +174,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long u = c - r;
     buffer.append(name);
     if(!NetworkTopology.DEFAULT_RACK.equals(location)) {
-        buffer.append(" "+location);
+      buffer.append(" "+location);
     }
     if (isDecommissioned()) {
       buffer.append(" DD");
@@ -209,51 +209,51 @@ public class DatanodeInfo extends DatanodeID implements Node {
   /**
    * Returns true if the node is in the process of being decommissioned
    */
-   boolean isDecommissionInProgress() {
-     if (adminState == AdminStates.DECOMMISSION_INPROGRESS) {
-       return true;
-     }
-     return false;
-   }
+  boolean isDecommissionInProgress() {
+    if (adminState == AdminStates.DECOMMISSION_INPROGRESS) {
+      return true;
+    }
+    return false;
+  }
 
   /**
    * Returns true if the node has been decommissioned.
    */
-   boolean isDecommissioned() {
-     if (adminState == AdminStates.DECOMMISSIONED) {
-       return true;
-     }
-     return false;
-   }
+  boolean isDecommissioned() {
+    if (adminState == AdminStates.DECOMMISSIONED) {
+      return true;
+    }
+    return false;
+  }
 
   /**
    * Sets the admin state to indicate that decommision is complete.
    */
-   void setDecommissioned() {
-     adminState = AdminStates.DECOMMISSIONED;
-   }
+  void setDecommissioned() {
+    adminState = AdminStates.DECOMMISSIONED;
+  }
 
-   /**
-    * Retrieves the admin state of this node.
-    */
-    AdminStates getAdminState() {
-      if (adminState == null) {
-        return AdminStates.NORMAL;
-      }
-      return adminState;
+  /**
+   * Retrieves the admin state of this node.
+   */
+  AdminStates getAdminState() {
+    if (adminState == null) {
+      return AdminStates.NORMAL;
     }
+    return adminState;
+  }
 
-   /**
-    * Sets the admin state of this node.
-    */
-    void setAdminState(AdminStates newState) {
-      if (newState == AdminStates.NORMAL) {
-        adminState = null;
-      }
-      else {
-        adminState = newState;
-      }
+  /**
+   * Sets the admin state of this node.
+   */
+  void setAdminState(AdminStates newState) {
+    if (newState == AdminStates.NORMAL) {
+      adminState = null;
+    }
+    else {
+      adminState = newState;
     }
+  }
 
   private int level; //which level of the tree the node resides
   private Node parent; //its parent
@@ -301,7 +301,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.xceiverCount = in.readInt();
     this.location = Text.readString( in );
     AdminStates newState = (AdminStates) WritableUtils.readEnum(in,
-                                         AdminStates.class);
+                                                                AdminStates.class);
     setAdminState(newState);
   }
 }

+ 38 - 38
src/java/org/apache/hadoop/dfs/DatanodeProtocol.java

@@ -64,47 +64,47 @@ interface DatanodeProtocol extends VersionedProtocol {
    * new storageID if the datanode did not have one and
    * registration ID for further communication.
    */
-    public DatanodeRegistration register( DatanodeRegistration registration,
-                                          String networkLocation
+  public DatanodeRegistration register( DatanodeRegistration registration,
+                                        String networkLocation
                                         ) throws IOException;
-    /**
-     * sendHeartbeat() tells the NameNode that the DataNode is still
-     * alive and well.  Includes some status info, too. 
-     * It also gives the NameNode a chance to return a "DatanodeCommand" object.
-     * A DatanodeCommand tells the DataNode to invalidate local block(s), 
-     * or to copy them to other DataNodes, etc.
-     */
-    public DatanodeCommand sendHeartbeat( DatanodeRegistration registration,
-                                          long capacity, long remaining,
-                                          int xmitsInProgress,
-                                          int xceiverCount) throws IOException;
+  /**
+   * sendHeartbeat() tells the NameNode that the DataNode is still
+   * alive and well.  Includes some status info, too. 
+   * It also gives the NameNode a chance to return a "DatanodeCommand" object.
+   * A DatanodeCommand tells the DataNode to invalidate local block(s), 
+   * or to copy them to other DataNodes, etc.
+   */
+  public DatanodeCommand sendHeartbeat( DatanodeRegistration registration,
+                                        long capacity, long remaining,
+                                        int xmitsInProgress,
+                                        int xceiverCount) throws IOException;
 
-    /**
-     * blockReport() tells the NameNode about all the locally-stored blocks.
-     * The NameNode returns an array of Blocks that have become obsolete
-     * and should be deleted.  This function is meant to upload *all*
-     * the locally-stored blocks.  It's invoked upon startup and then
-     * infrequently afterwards.
-     */
-    public DatanodeCommand blockReport( DatanodeRegistration registration,
-                                        Block blocks[]) throws IOException;
+  /**
+   * blockReport() tells the NameNode about all the locally-stored blocks.
+   * The NameNode returns an array of Blocks that have become obsolete
+   * and should be deleted.  This function is meant to upload *all*
+   * the locally-stored blocks.  It's invoked upon startup and then
+   * infrequently afterwards.
+   */
+  public DatanodeCommand blockReport( DatanodeRegistration registration,
+                                      Block blocks[]) throws IOException;
     
-    /**
-     * blockReceived() allows the DataNode to tell the NameNode about
-     * recently-received block data.  For example, whenever client code
-     * writes a new Block here, or another DataNode copies a Block to
-     * this DataNode, it will call blockReceived().
-     */
-    public void blockReceived(DatanodeRegistration registration,
-                              Block blocks[]) throws IOException;
+  /**
+   * blockReceived() allows the DataNode to tell the NameNode about
+   * recently-received block data.  For example, whenever client code
+   * writes a new Block here, or another DataNode copies a Block to
+   * this DataNode, it will call blockReceived().
+   */
+  public void blockReceived(DatanodeRegistration registration,
+                            Block blocks[]) throws IOException;
 
-    /**
-     * errorReport() tells the NameNode about something that has gone
-     * awry.  Useful for debugging.
-     */
-    public void errorReport(DatanodeRegistration registration,
-                            int errorCode, 
-                            String msg) throws IOException;
+  /**
+   * errorReport() tells the NameNode about something that has gone
+   * awry.  Useful for debugging.
+   */
+  public void errorReport(DatanodeRegistration registration,
+                          int errorCode, 
+                          String msg) throws IOException;
     
-    public NamespaceInfo versionRequest() throws IOException;
+  public NamespaceInfo versionRequest() throws IOException;
 }

+ 23 - 23
src/java/org/apache/hadoop/dfs/DfsPath.java

@@ -26,29 +26,29 @@ import org.apache.hadoop.fs.Path;
  * a file with the names in a directory listing to make accesses faster.
  */
 class DfsPath extends Path {
-    DFSFileInfo info;
+  DFSFileInfo info;
 
-    public DfsPath(DFSFileInfo info) {
-        super(info.getPath());
-        this.info = info;
-    }
+  public DfsPath(DFSFileInfo info) {
+    super(info.getPath());
+    this.info = info;
+  }
 
-    public boolean isDirectory() {
-        return info.isDir();
-    }
-    public boolean isFile() {
-        return ! isDirectory();
-    }
-    public long length() {
-        return info.getLen();
-    }
-    public long getContentsLength() {
-        return info.getContentsLen();
-    }
-    public short getReplication() {
-      return info.getReplication();
-    }
-    public long getBlockSize() {
-      return info.getBlockSize();
-    }
+  public boolean isDirectory() {
+    return info.isDir();
+  }
+  public boolean isFile() {
+    return ! isDirectory();
+  }
+  public long length() {
+    return info.getLen();
+  }
+  public long getContentsLength() {
+    return info.getContentsLen();
+  }
+  public short getReplication() {
+    return info.getReplication();
+  }
+  public long getBlockSize() {
+    return info.getBlockSize();
+  }
 }

+ 121 - 121
src/java/org/apache/hadoop/dfs/DistributedFileSystem.java

@@ -34,9 +34,9 @@ import org.apache.hadoop.util.*;
  * @author Mike Cafarella
  *****************************************************************/
 public class DistributedFileSystem extends ChecksumFileSystem {
-    private static class RawDistributedFileSystem extends FileSystem {
+  private static class RawDistributedFileSystem extends FileSystem {
     private Path workingDir =
-        new Path("/user", System.getProperty("user.name")); 
+      new Path("/user", System.getProperty("user.name")); 
     private URI uri;
     private FileSystem localFs;
 
@@ -48,7 +48,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
 
     /** @deprecated */
     public RawDistributedFileSystem(InetSocketAddress namenode,
-                                 Configuration conf) throws IOException {
+                                    Configuration conf) throws IOException {
       initialize(URI.create("hdfs://"+
                             namenode.getHostName()+":"+
                             namenode.getPort()),
@@ -111,8 +111,8 @@ public class DistributedFileSystem extends ChecksumFileSystem {
       String result = makeAbsolute(file).toUri().getPath();
       if (!FSNamesystem.isValidName(result)) {
         throw new IllegalArgumentException("Pathname " + result + " from " +
-                                            file +
-                                            " is not a valid DFS filename.");
+                                           file +
+                                           " is not a valid DFS filename.");
       }
       return new UTF8(result);
     }
@@ -130,10 +130,10 @@ public class DistributedFileSystem extends ChecksumFileSystem {
     }
 
     public FSDataOutputStream create(Path f, boolean overwrite,
-            int bufferSize, short replication, long blockSize,
-            Progressable progress) throws IOException {
+                                     int bufferSize, short replication, long blockSize,
+                                     Progressable progress) throws IOException {
       if (exists(f) && ! overwrite) {
-         throw new IOException("File already exists:"+f);
+        throw new IOException("File already exists:"+f);
       }
       Path parent = f.getParent();
       if (parent != null && !mkdirs(parent)) {
@@ -141,14 +141,14 @@ public class DistributedFileSystem extends ChecksumFileSystem {
       }
       
       return new FSDataOutputStream(
-           dfs.create(getPath(f), overwrite,
-                   replication, blockSize, progress),
-           bufferSize);
+                                    dfs.create(getPath(f), overwrite,
+                                               replication, blockSize, progress),
+                                    bufferSize);
     }
     
     public boolean setReplication( Path src, 
-                                      short replication
-                                    ) throws IOException {
+                                   short replication
+                                   ) throws IOException {
       return dfs.setReplication(getPath(src), replication);
     }
     
@@ -163,36 +163,36 @@ public class DistributedFileSystem extends ChecksumFileSystem {
      * Get rid of Path f, whether a true file or dir.
      */
     public boolean delete(Path f) throws IOException {
-        return dfs.delete(getPath(f));
+      return dfs.delete(getPath(f));
     }
 
     public boolean exists(Path f) throws IOException {
-        return dfs.exists(getPath(f));
+      return dfs.exists(getPath(f));
     }
 
     public boolean isDirectory(Path f) throws IOException {
-        if (f instanceof DfsPath) {
-          return ((DfsPath)f).isDirectory();
-        }
-        return dfs.isDirectory(getPath(f));
+      if (f instanceof DfsPath) {
+        return ((DfsPath)f).isDirectory();
+      }
+      return dfs.isDirectory(getPath(f));
     }
 
     public long getLength(Path f) throws IOException {
-        if (f instanceof DfsPath) {
-          return ((DfsPath)f).length();
-        }
+      if (f instanceof DfsPath) {
+        return ((DfsPath)f).length();
+      }
 
-        DFSFileInfo info[] = dfs.listPaths(getPath(f));
-        return (info == null) ? 0 : info[0].getLen();
+      DFSFileInfo info[] = dfs.listPaths(getPath(f));
+      return (info == null) ? 0 : info[0].getLen();
     }
 
     public long getContentLength(Path f) throws IOException {
-        if (f instanceof DfsPath) {
-            return ((DfsPath)f).getContentsLength();
-          }
+      if (f instanceof DfsPath) {
+        return ((DfsPath)f).getContentsLength();
+      }
 
-          DFSFileInfo info[] = dfs.listPaths(getPath(f));
-          return (info == null) ? 0 : info[0].getContentsLen();
+      DFSFileInfo info[] = dfs.listPaths(getPath(f));
+      return (info == null) ? 0 : info[0].getContentsLen();
     }
 
     public short getReplication(Path f) throws IOException {
@@ -202,44 +202,44 @@ public class DistributedFileSystem extends ChecksumFileSystem {
 
       DFSFileInfo info[] = dfs.listPaths(getPath(f));
       return info[0].getReplication();
-  }
+    }
 
     public Path[] listPaths(Path f) throws IOException {
-        DFSFileInfo info[] = dfs.listPaths(getPath(f));
-        if (info == null) {
-            return new Path[0];
-        } else {
-            Path results[] = new DfsPath[info.length];
-            for (int i = 0; i < info.length; i++) {
-                results[i] = new DfsPath(info[i]);
-            }
-            return results;
+      DFSFileInfo info[] = dfs.listPaths(getPath(f));
+      if (info == null) {
+        return new Path[0];
+      } else {
+        Path results[] = new DfsPath[info.length];
+        for (int i = 0; i < info.length; i++) {
+          results[i] = new DfsPath(info[i]);
         }
+        return results;
+      }
     }
 
     public boolean mkdirs(Path f) throws IOException {
-        return dfs.mkdirs(getPath(f));
+      return dfs.mkdirs(getPath(f));
     }
 
     /** @deprecated */ @Deprecated
-    public void lock(Path f, boolean shared) throws IOException {
-        dfs.lock(getPath(f), ! shared);
+      public void lock(Path f, boolean shared) throws IOException {
+      dfs.lock(getPath(f), ! shared);
     }
 
     /** @deprecated */ @Deprecated
-    public void release(Path f) throws IOException {
-        dfs.release(getPath(f));
+      public void release(Path f) throws IOException {
+      dfs.release(getPath(f));
     }
 
     @Override
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
-    throws IOException {
+      public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+      throws IOException {
       FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
     }
 
     @Override
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
-    throws IOException {
+      public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+      throws IOException {
       FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
     }
 
@@ -257,27 +257,27 @@ public class DistributedFileSystem extends ChecksumFileSystem {
     }
 
     public void close() throws IOException {
-        super.close();
-        dfs.close();
+      super.close();
+      dfs.close();
     }
 
     public String toString() {
-        return "DFS[" + dfs + "]";
+      return "DFS[" + dfs + "]";
     }
 
     DFSClient getClient() {
-        return dfs;
+      return dfs;
     }        
     /** Return the total raw capacity of the filesystem, disregarding
      * replication .*/
     public long getRawCapacity() throws IOException{
-        return dfs.totalRawCapacity();
+      return dfs.totalRawCapacity();
     }
 
     /** Return the total raw used space in the filesystem, disregarding
      * replication .*/
     public long getRawUsed() throws IOException{
-        return dfs.totalRawUsed();
+      return dfs.totalRawUsed();
     }
 
     /** Return statistics for each datanode. */
@@ -291,7 +291,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
      * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
      */
     public boolean setSafeMode( FSConstants.SafeModeAction action ) 
-    throws IOException {
+      throws IOException {
       return dfs.setSafeMode( action );
     }
 
@@ -325,8 +325,8 @@ public class DistributedFileSystem extends ChecksumFileSystem {
      * we can consider figuring out exactly which block is corrupt.
      */
     public boolean reportChecksumFailure(Path f, 
-                                      FSDataInputStream in, long inPos, 
-                                      FSDataInputStream sums, long sumsPos) {
+                                         FSDataInputStream in, long inPos, 
+                                         FSDataInputStream sums, long sumsPos) {
       
       LocatedBlock lblocks[] = new LocatedBlock[2];
 
@@ -365,81 +365,81 @@ public class DistributedFileSystem extends ChecksumFileSystem {
 
       return true;
     }
-    }
+  }
 
-    public DistributedFileSystem() {
-        super( new RawDistributedFileSystem() );
-    }
+  public DistributedFileSystem() {
+    super( new RawDistributedFileSystem() );
+  }
 
-    /** @deprecated */
-    public DistributedFileSystem(InetSocketAddress namenode,
-                                 Configuration conf) throws IOException {
-      super( new RawDistributedFileSystem(namenode, conf) );
-    }
+  /** @deprecated */
+  public DistributedFileSystem(InetSocketAddress namenode,
+                               Configuration conf) throws IOException {
+    super( new RawDistributedFileSystem(namenode, conf) );
+  }
 
-    @Override
+  @Override
     public long getContentLength(Path f) throws IOException {
-      return fs.getContentLength(f);
-    }
+    return fs.getContentLength(f);
+  }
 
-    /** Return the total raw capacity of the filesystem, disregarding
-     * replication .*/
-    public long getRawCapacity() throws IOException{
-        return ((RawDistributedFileSystem)fs).getRawCapacity();
-    }
+  /** Return the total raw capacity of the filesystem, disregarding
+   * replication .*/
+  public long getRawCapacity() throws IOException{
+    return ((RawDistributedFileSystem)fs).getRawCapacity();
+  }
 
-    /** Return the total raw used space in the filesystem, disregarding
-     * replication .*/
-    public long getRawUsed() throws IOException{
-        return ((RawDistributedFileSystem)fs).getRawUsed();
-    }
+  /** Return the total raw used space in the filesystem, disregarding
+   * replication .*/
+  public long getRawUsed() throws IOException{
+    return ((RawDistributedFileSystem)fs).getRawUsed();
+  }
 
-    /** Return statistics for each datanode. */
-    public DatanodeInfo[] getDataNodeStats() throws IOException {
-      return ((RawDistributedFileSystem)fs).getDataNodeStats();
-    }
+  /** Return statistics for each datanode. */
+  public DatanodeInfo[] getDataNodeStats() throws IOException {
+    return ((RawDistributedFileSystem)fs).getDataNodeStats();
+  }
     
-    /**
-     * Enter, leave or get safe mode.
-     *  
-     * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
-     */
-    public boolean setSafeMode( FSConstants.SafeModeAction action ) 
+  /**
+   * Enter, leave or get safe mode.
+   *  
+   * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
+   */
+  public boolean setSafeMode( FSConstants.SafeModeAction action ) 
     throws IOException {
-      return ((RawDistributedFileSystem)fs).setSafeMode( action );
-    }
+    return ((RawDistributedFileSystem)fs).setSafeMode( action );
+  }
 
-    /*
-     * Refreshes the list of hosts and excluded hosts from the configured 
-     * files.  
-     */
-    public void refreshNodes() throws IOException {
-      ((RawDistributedFileSystem)fs).refreshNodes();
-    }
+  /*
+   * Refreshes the list of hosts and excluded hosts from the configured 
+   * files.  
+   */
+  public void refreshNodes() throws IOException {
+    ((RawDistributedFileSystem)fs).refreshNodes();
+  }
 
-    /**
-     * Finalize previously upgraded files system state.
-     */
-    public void finalizeUpgrade() throws IOException {
-      ((RawDistributedFileSystem)fs).finalizeUpgrade();
-    }
+  /**
+   * Finalize previously upgraded files system state.
+   */
+  public void finalizeUpgrade() throws IOException {
+    ((RawDistributedFileSystem)fs).finalizeUpgrade();
+  }
 
-    /*
-     * Dumps dfs data structures into specified file.
-     */
-     public void metaSave(String pathname) throws IOException {
-       ((RawDistributedFileSystem)fs).metaSave(pathname);
-     }
+  /*
+   * Dumps dfs data structures into specified file.
+   */
+  public void metaSave(String pathname) throws IOException {
+    ((RawDistributedFileSystem)fs).metaSave(pathname);
+  }
 
-    /**
-     * We need to find the blocks that didn't match.  Likely only one 
-     * is corrupt but we will report both to the namenode.  In the future,
-     * we can consider figuring out exactly which block is corrupt.
-     */
-    public boolean reportChecksumFailure(Path f, 
-                                      FSDataInputStream in, long inPos, 
-                                      FSDataInputStream sums, long sumsPos) {
-      return ((RawDistributedFileSystem)fs).reportChecksumFailure(
-                f, in, inPos, sums, sumsPos);
-    }
+  /**
+   * We need to find the blocks that didn't match.  Likely only one 
+   * is corrupt but we will report both to the namenode.  In the future,
+   * we can consider figuring out exactly which block is corrupt.
+   */
+  public boolean reportChecksumFailure(Path f, 
+                                       FSDataInputStream in, long inPos, 
+                                       FSDataInputStream sums, long sumsPos) {
+    return ((RawDistributedFileSystem)fs).reportChecksumFailure(
+                                                                f, in, inPos, sums, sumsPos);
+  }
 }

+ 114 - 114
src/java/org/apache/hadoop/dfs/FSConstants.java

@@ -25,120 +25,120 @@ import org.apache.hadoop.conf.Configuration;
  * @author Mike Cafarella
  ************************************/
 public interface FSConstants {
-    public static int MIN_BLOCKS_FOR_WRITE = 5;
-
-    public static final long WRITE_COMPLETE = 0xcafae11a;
-
-    //
-    // IPC Opcodes 
-    //
-    // Processed at namenode
-    public static final byte OP_ERROR = (byte) 0;
-    public static final byte OP_HEARTBEAT = (byte) 1;
-    public static final byte OP_BLOCKRECEIVED = (byte) 2;
-    public static final byte OP_BLOCKREPORT = (byte) 3;
-    public static final byte OP_TRANSFERDATA = (byte) 4;
-
-    // Processed at namenode, from client
-    public static final byte OP_CLIENT_OPEN = (byte) 20;
-    public static final byte OP_CLIENT_STARTFILE = (byte) 21;
-    public static final byte OP_CLIENT_ADDBLOCK = (byte) 22;
-    public static final byte OP_CLIENT_RENAMETO = (byte) 23;
-    public static final byte OP_CLIENT_DELETE = (byte) 24;  
-    public static final byte OP_CLIENT_COMPLETEFILE = (byte) 25;
-    public static final byte OP_CLIENT_LISTING = (byte) 26;
-    public static final byte OP_CLIENT_OBTAINLOCK = (byte) 27;
-    public static final byte OP_CLIENT_RELEASELOCK = (byte) 28;
-    public static final byte OP_CLIENT_EXISTS = (byte) 29;
-    public static final byte OP_CLIENT_ISDIR = (byte) 30;
-    public static final byte OP_CLIENT_MKDIRS = (byte) 31;
-    public static final byte OP_CLIENT_RENEW_LEASE = (byte) 32;
-    public static final byte OP_CLIENT_ABANDONBLOCK = (byte) 33;
-    public static final byte OP_CLIENT_RAWSTATS = (byte) 34;
-    public static final byte OP_CLIENT_DATANODEREPORT = (byte) 35;
-    public static final byte OP_CLIENT_DATANODE_HINTS = (byte) 36;
+  public static int MIN_BLOCKS_FOR_WRITE = 5;
+
+  public static final long WRITE_COMPLETE = 0xcafae11a;
+
+  //
+  // IPC Opcodes 
+  //
+  // Processed at namenode
+  public static final byte OP_ERROR = (byte) 0;
+  public static final byte OP_HEARTBEAT = (byte) 1;
+  public static final byte OP_BLOCKRECEIVED = (byte) 2;
+  public static final byte OP_BLOCKREPORT = (byte) 3;
+  public static final byte OP_TRANSFERDATA = (byte) 4;
+
+  // Processed at namenode, from client
+  public static final byte OP_CLIENT_OPEN = (byte) 20;
+  public static final byte OP_CLIENT_STARTFILE = (byte) 21;
+  public static final byte OP_CLIENT_ADDBLOCK = (byte) 22;
+  public static final byte OP_CLIENT_RENAMETO = (byte) 23;
+  public static final byte OP_CLIENT_DELETE = (byte) 24;  
+  public static final byte OP_CLIENT_COMPLETEFILE = (byte) 25;
+  public static final byte OP_CLIENT_LISTING = (byte) 26;
+  public static final byte OP_CLIENT_OBTAINLOCK = (byte) 27;
+  public static final byte OP_CLIENT_RELEASELOCK = (byte) 28;
+  public static final byte OP_CLIENT_EXISTS = (byte) 29;
+  public static final byte OP_CLIENT_ISDIR = (byte) 30;
+  public static final byte OP_CLIENT_MKDIRS = (byte) 31;
+  public static final byte OP_CLIENT_RENEW_LEASE = (byte) 32;
+  public static final byte OP_CLIENT_ABANDONBLOCK = (byte) 33;
+  public static final byte OP_CLIENT_RAWSTATS = (byte) 34;
+  public static final byte OP_CLIENT_DATANODEREPORT = (byte) 35;
+  public static final byte OP_CLIENT_DATANODE_HINTS = (byte) 36;
     
-    // Processed at datanode, back from namenode
-    public static final byte OP_ACK = (byte) 40;
-    public static final byte OP_TRANSFERBLOCKS = (byte) 41;    
-    public static final byte OP_INVALIDATE_BLOCKS = (byte) 42;
-    public static final byte OP_FAILURE = (byte) 43;
-
-    // Processed at client, back from namenode
-    public static final byte OP_CLIENT_OPEN_ACK = (byte) 60;
-    public static final byte OP_CLIENT_STARTFILE_ACK = (byte) 61;
-    public static final byte OP_CLIENT_ADDBLOCK_ACK = (byte) 62;
-    public static final byte OP_CLIENT_RENAMETO_ACK = (byte) 63;
-    public static final byte OP_CLIENT_DELETE_ACK = (byte) 64;
-    public static final byte OP_CLIENT_COMPLETEFILE_ACK = (byte) 65;
-    public static final byte OP_CLIENT_TRYAGAIN = (byte) 66;
-    public static final byte OP_CLIENT_LISTING_ACK = (byte) 67;
-    public static final byte OP_CLIENT_OBTAINLOCK_ACK = (byte) 68;
-    public static final byte OP_CLIENT_RELEASELOCK_ACK = (byte) 69;
-    public static final byte OP_CLIENT_EXISTS_ACK = (byte) 70;  
-    public static final byte OP_CLIENT_ISDIR_ACK = (byte) 71;
-    public static final byte OP_CLIENT_MKDIRS_ACK = (byte) 72;
-    public static final byte OP_CLIENT_RENEW_LEASE_ACK = (byte) 73;    
-    public static final byte OP_CLIENT_ABANDONBLOCK_ACK = (byte) 74;
-    public static final byte OP_CLIENT_RAWSTATS_ACK = (byte) 75;
-    public static final byte OP_CLIENT_DATANODEREPORT_ACK = (byte) 76;
-    public static final byte OP_CLIENT_DATANODE_HINTS_ACK = (byte) 77;
-
-    // Processed at datanode stream-handler
-    public static final byte OP_WRITE_BLOCK = (byte) 80;
-    public static final byte OP_READ_BLOCK = (byte) 81;
-    public static final byte OP_READSKIP_BLOCK = (byte) 82;
-    public static final byte OP_READ_RANGE_BLOCK = (byte) 83;
-
-    // Encoding types
-    public static final byte RUNLENGTH_ENCODING = 0;
-    public static final byte CHUNKED_ENCODING = 1;
-
-    // Return codes for file create
-    public static final int OPERATION_FAILED = 0;
-    public static final int STILL_WAITING = 1;
-    public static final int COMPLETE_SUCCESS = 2;
-
-    // Chunk the block Invalidate message
-    public static final int BLOCK_INVALIDATE_CHUNK = 100;
-
-    //
-    // Timeouts, constants
-    //
-    public static long HEARTBEAT_INTERVAL = 3;
-    public static long BLOCKREPORT_INTERVAL = 60 * 60 * 1000;
-    public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
-    public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
-    public static int READ_TIMEOUT = 60 * 1000;
-
-    // We need to limit the length and depth of a path in the filesystem.  HADOOP-438
-    // Currently we set the maximum length to 8k characters and the maximum depth to 1k.  
-    public static int MAX_PATH_LENGTH = 8000;
-    public static int MAX_PATH_DEPTH = 1000;
+  // Processed at datanode, back from namenode
+  public static final byte OP_ACK = (byte) 40;
+  public static final byte OP_TRANSFERBLOCKS = (byte) 41;    
+  public static final byte OP_INVALIDATE_BLOCKS = (byte) 42;
+  public static final byte OP_FAILURE = (byte) 43;
+
+  // Processed at client, back from namenode
+  public static final byte OP_CLIENT_OPEN_ACK = (byte) 60;
+  public static final byte OP_CLIENT_STARTFILE_ACK = (byte) 61;
+  public static final byte OP_CLIENT_ADDBLOCK_ACK = (byte) 62;
+  public static final byte OP_CLIENT_RENAMETO_ACK = (byte) 63;
+  public static final byte OP_CLIENT_DELETE_ACK = (byte) 64;
+  public static final byte OP_CLIENT_COMPLETEFILE_ACK = (byte) 65;
+  public static final byte OP_CLIENT_TRYAGAIN = (byte) 66;
+  public static final byte OP_CLIENT_LISTING_ACK = (byte) 67;
+  public static final byte OP_CLIENT_OBTAINLOCK_ACK = (byte) 68;
+  public static final byte OP_CLIENT_RELEASELOCK_ACK = (byte) 69;
+  public static final byte OP_CLIENT_EXISTS_ACK = (byte) 70;  
+  public static final byte OP_CLIENT_ISDIR_ACK = (byte) 71;
+  public static final byte OP_CLIENT_MKDIRS_ACK = (byte) 72;
+  public static final byte OP_CLIENT_RENEW_LEASE_ACK = (byte) 73;    
+  public static final byte OP_CLIENT_ABANDONBLOCK_ACK = (byte) 74;
+  public static final byte OP_CLIENT_RAWSTATS_ACK = (byte) 75;
+  public static final byte OP_CLIENT_DATANODEREPORT_ACK = (byte) 76;
+  public static final byte OP_CLIENT_DATANODE_HINTS_ACK = (byte) 77;
+
+  // Processed at datanode stream-handler
+  public static final byte OP_WRITE_BLOCK = (byte) 80;
+  public static final byte OP_READ_BLOCK = (byte) 81;
+  public static final byte OP_READSKIP_BLOCK = (byte) 82;
+  public static final byte OP_READ_RANGE_BLOCK = (byte) 83;
+
+  // Encoding types
+  public static final byte RUNLENGTH_ENCODING = 0;
+  public static final byte CHUNKED_ENCODING = 1;
+
+  // Return codes for file create
+  public static final int OPERATION_FAILED = 0;
+  public static final int STILL_WAITING = 1;
+  public static final int COMPLETE_SUCCESS = 2;
+
+  // Chunk the block Invalidate message
+  public static final int BLOCK_INVALIDATE_CHUNK = 100;
+
+  //
+  // Timeouts, constants
+  //
+  public static long HEARTBEAT_INTERVAL = 3;
+  public static long BLOCKREPORT_INTERVAL = 60 * 60 * 1000;
+  public static final long LEASE_SOFTLIMIT_PERIOD = 60 * 1000;
+  public static final long LEASE_HARDLIMIT_PERIOD = 60 * LEASE_SOFTLIMIT_PERIOD;
+  public static int READ_TIMEOUT = 60 * 1000;
+
+  // We need to limit the length and depth of a path in the filesystem.  HADOOP-438
+  // Currently we set the maximum length to 8k characters and the maximum depth to 1k.  
+  public static int MAX_PATH_LENGTH = 8000;
+  public static int MAX_PATH_DEPTH = 1000;
     
-    //TODO mb@media-style.com: should be conf injected?
-    public static final int BUFFER_SIZE = new Configuration().getInt("io.file.buffer.size", 4096);
-
-    // SafeMode actions
-    public enum SafeModeAction{ SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET; }
-
-    // Startup options
-    public enum StartupOption{ FORMAT, REGULAR, UPGRADE, ROLLBACK; }
-
-    /**
-     * Type of the node
-     */
-    static public enum NodeType {
-      NAME_NODE,
-      DATA_NODE;
-    }
-
-    // Version is reflected in the dfs image and edit log files.
-    // Version is reflected in the data storage file.
-    // Versions are negative.
-    // Decrement LAYOUT_VERSION to define a new version.
-    public static final int LAYOUT_VERSION = -4;
-    // Current version: 
-    // Top level directory is reorganized to allow file system state 
-    // transitions: upgrade, rollback, and finalize.
+  //TODO mb@media-style.com: should be conf injected?
+  public static final int BUFFER_SIZE = new Configuration().getInt("io.file.buffer.size", 4096);
+
+  // SafeMode actions
+  public enum SafeModeAction{ SAFEMODE_LEAVE, SAFEMODE_ENTER, SAFEMODE_GET; }
+
+  // Startup options
+  public enum StartupOption{ FORMAT, REGULAR, UPGRADE, ROLLBACK; }
+
+  /**
+   * Type of the node
+   */
+  static public enum NodeType {
+    NAME_NODE,
+    DATA_NODE;
+  }
+
+  // Version is reflected in the dfs image and edit log files.
+  // Version is reflected in the data storage file.
+  // Versions are negative.
+  // Decrement LAYOUT_VERSION to define a new version.
+  public static final int LAYOUT_VERSION = -4;
+  // Current version: 
+  // Top level directory is reorganized to allow file system state 
+  // transitions: upgrade, rollback, and finalize.
 }

+ 505 - 505
src/java/org/apache/hadoop/dfs/FSDataset.java

@@ -36,609 +36,609 @@ class FSDataset implements FSConstants {
 
 
   /**
-     * A node type that can be built into a tree reflecting the
-     * hierarchy of blocks on the local disk.
+   * A node type that can be built into a tree reflecting the
+   * hierarchy of blocks on the local disk.
+   */
+  class FSDir {
+    File dir;
+    int numBlocks = 0;
+    FSDir children[];
+    int lastChildIdx = 0;
+    /**
      */
-    class FSDir {
-        File dir;
-        int numBlocks = 0;
-        FSDir children[];
-        int lastChildIdx = 0;
-        /**
-         */
-        public FSDir(File dir) 
-            throws IOException {
-            this.dir = dir;
-            this.children = null;
-            if (! dir.exists()) {
-              if (! dir.mkdirs()) {
-                throw new IOException("Mkdirs failed to create " + 
-                                      dir.toString());
-              }
-            } else {
-              File[] files = dir.listFiles();
-              int numChildren = 0;
-              for (int idx = 0; idx < files.length; idx++) {
-                if (files[idx].isDirectory()) {
-                  numChildren++;
-                } else if (Block.isBlockFilename(files[idx])) {
-                  numBlocks++;
-                }
-              }
-              if (numChildren > 0) {
-                children = new FSDir[numChildren];
-                int curdir = 0;
-                for (int idx = 0; idx < files.length; idx++) {
-                  if (files[idx].isDirectory()) {
-                    children[curdir] = new FSDir(files[idx]);
-                    curdir++;
-                  }
-                }
-              }
+    public FSDir(File dir) 
+      throws IOException {
+      this.dir = dir;
+      this.children = null;
+      if (! dir.exists()) {
+        if (! dir.mkdirs()) {
+          throw new IOException("Mkdirs failed to create " + 
+                                dir.toString());
+        }
+      } else {
+        File[] files = dir.listFiles();
+        int numChildren = 0;
+        for (int idx = 0; idx < files.length; idx++) {
+          if (files[idx].isDirectory()) {
+            numChildren++;
+          } else if (Block.isBlockFilename(files[idx])) {
+            numBlocks++;
+          }
+        }
+        if (numChildren > 0) {
+          children = new FSDir[numChildren];
+          int curdir = 0;
+          for (int idx = 0; idx < files.length; idx++) {
+            if (files[idx].isDirectory()) {
+              children[curdir] = new FSDir(files[idx]);
+              curdir++;
             }
+          }
         }
+      }
+    }
         
-        public File addBlock( Block b, File src ) throws IOException {
-          //First try without creating subdirectories
-          File file = addBlock( b, src, false, false );          
-          return ( file != null ) ? file : addBlock( b, src, true, true );
-        }
+    public File addBlock( Block b, File src ) throws IOException {
+      //First try without creating subdirectories
+      File file = addBlock( b, src, false, false );          
+      return ( file != null ) ? file : addBlock( b, src, true, true );
+    }
 
-        private File addBlock( Block b, File src, boolean createOk, 
-                               boolean resetIdx ) throws IOException {
-            if (numBlocks < maxBlocksPerDir) {
-              File dest = new File(dir, b.getBlockName());
-              src.renameTo(dest);
-              numBlocks += 1;
-              return dest;
-            }
-            
-            if ( lastChildIdx < 0 && resetIdx ) {
-              //reset so that all children will be checked
-              lastChildIdx = random.nextInt( children.length );              
-            }
+    private File addBlock( Block b, File src, boolean createOk, 
+                           boolean resetIdx ) throws IOException {
+      if (numBlocks < maxBlocksPerDir) {
+        File dest = new File(dir, b.getBlockName());
+        src.renameTo(dest);
+        numBlocks += 1;
+        return dest;
+      }
             
-            if ( lastChildIdx >= 0 && children != null ) {
-              //Check if any child-tree has room for a block.
-              for (int i=0; i < children.length; i++) {
-                int idx = ( lastChildIdx + i )%children.length;
-                File file = children[idx].addBlock( b, src, false, resetIdx );
-                if ( file != null ) {
-                  lastChildIdx = idx;
-                  return file; 
-                }
-              }
-              lastChildIdx = -1;
-            }
+      if ( lastChildIdx < 0 && resetIdx ) {
+        //reset so that all children will be checked
+        lastChildIdx = random.nextInt( children.length );              
+      }
             
-            if ( !createOk ) {
-              return null;
-            }
+      if ( lastChildIdx >= 0 && children != null ) {
+        //Check if any child-tree has room for a block.
+        for (int i=0; i < children.length; i++) {
+          int idx = ( lastChildIdx + i )%children.length;
+          File file = children[idx].addBlock( b, src, false, resetIdx );
+          if ( file != null ) {
+            lastChildIdx = idx;
+            return file; 
+          }
+        }
+        lastChildIdx = -1;
+      }
             
-            if ( children == null || children.length == 0 ) {
-              children = new FSDir[maxBlocksPerDir];
-              for (int idx = 0; idx < maxBlocksPerDir; idx++) {
-                children[idx] = new FSDir( new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx) );
-              }
-            }
+      if ( !createOk ) {
+        return null;
+      }
             
-            //now pick a child randomly for creating a new set of subdirs.
-            lastChildIdx = random.nextInt( children.length );
-            return children[ lastChildIdx ].addBlock( b, src, true, false ); 
+      if ( children == null || children.length == 0 ) {
+        children = new FSDir[maxBlocksPerDir];
+        for (int idx = 0; idx < maxBlocksPerDir; idx++) {
+          children[idx] = new FSDir( new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx) );
         }
+      }
+            
+      //now pick a child randomly for creating a new set of subdirs.
+      lastChildIdx = random.nextInt( children.length );
+      return children[ lastChildIdx ].addBlock( b, src, true, false ); 
+    }
 
-        /**
-         * Populate the given blockSet with any child blocks
-         * found at this node.
-         */
-        public void getBlockInfo(TreeSet<Block> blockSet) {
-            if (children != null) {
-                for (int i = 0; i < children.length; i++) {
-                    children[i].getBlockInfo(blockSet);
-                }
-            }
+    /**
+     * Populate the given blockSet with any child blocks
+     * found at this node.
+     */
+    public void getBlockInfo(TreeSet<Block> blockSet) {
+      if (children != null) {
+        for (int i = 0; i < children.length; i++) {
+          children[i].getBlockInfo(blockSet);
+        }
+      }
 
-            File blockFiles[] = dir.listFiles();
-            for (int i = 0; i < blockFiles.length; i++) {
-                if (Block.isBlockFilename(blockFiles[i])) {
-                    blockSet.add(new Block(blockFiles[i], blockFiles[i].length()));
-                }
-            }
+      File blockFiles[] = dir.listFiles();
+      for (int i = 0; i < blockFiles.length; i++) {
+        if (Block.isBlockFilename(blockFiles[i])) {
+          blockSet.add(new Block(blockFiles[i], blockFiles[i].length()));
         }
+      }
+    }
 
 
-        void getVolumeMap(HashMap<Block, FSVolume> volumeMap, FSVolume volume) {
-          if (children != null) {
-                for (int i = 0; i < children.length; i++) {
-                    children[i].getVolumeMap(volumeMap, volume);
-                }
-            }
+    void getVolumeMap(HashMap<Block, FSVolume> volumeMap, FSVolume volume) {
+      if (children != null) {
+        for (int i = 0; i < children.length; i++) {
+          children[i].getVolumeMap(volumeMap, volume);
+        }
+      }
 
-            File blockFiles[] = dir.listFiles();
-            for (int i = 0; i < blockFiles.length; i++) {
-                if (Block.isBlockFilename(blockFiles[i])) {
-                    volumeMap.put(new Block(blockFiles[i], blockFiles[i].length()), volume);
-                }
-            }
+      File blockFiles[] = dir.listFiles();
+      for (int i = 0; i < blockFiles.length; i++) {
+        if (Block.isBlockFilename(blockFiles[i])) {
+          volumeMap.put(new Block(blockFiles[i], blockFiles[i].length()), volume);
         }
+      }
+    }
         
-        void getBlockMap(HashMap<Block, File> blockMap) {
-          if (children != null) {
-                for (int i = 0; i < children.length; i++) {
-                    children[i].getBlockMap(blockMap);
-                }
-            }
+    void getBlockMap(HashMap<Block, File> blockMap) {
+      if (children != null) {
+        for (int i = 0; i < children.length; i++) {
+          children[i].getBlockMap(blockMap);
+        }
+      }
 
-            File blockFiles[] = dir.listFiles();
-            for (int i = 0; i < blockFiles.length; i++) {
-                if (Block.isBlockFilename(blockFiles[i])) {
-                    blockMap.put(new Block(blockFiles[i], blockFiles[i].length()), blockFiles[i]);
-                }
-            }
+      File blockFiles[] = dir.listFiles();
+      for (int i = 0; i < blockFiles.length; i++) {
+        if (Block.isBlockFilename(blockFiles[i])) {
+          blockMap.put(new Block(blockFiles[i], blockFiles[i].length()), blockFiles[i]);
         }
-        /**
-         * check if a data diretory is healthy
-         * @throws DiskErrorException
-         * @author hairong
-         */
-        public void checkDirTree() throws DiskErrorException {
-            DiskChecker.checkDir(dir);
+      }
+    }
+    /**
+     * check if a data diretory is healthy
+     * @throws DiskErrorException
+     * @author hairong
+     */
+    public void checkDirTree() throws DiskErrorException {
+      DiskChecker.checkDir(dir);
             
-            if (children != null) {
-                for (int i = 0; i < children.length; i++) {
-                    children[i].checkDirTree();
-                }
-            }
+      if (children != null) {
+        for (int i = 0; i < children.length; i++) {
+          children[i].checkDirTree();
         }
+      }
+    }
         
-        void clearPath(File f) {
-          String root = dir.getAbsolutePath();
-          String dir = f.getAbsolutePath();
-          if ( dir.startsWith( root ) ) {
-            String[] dirNames = dir.substring( root.length() ).
-                         split( File.separator + "subdir" );
-            if ( clearPath( f, dirNames, 1 ) )
-              return;
-          }
-          clearPath( f, null, -1 );
-        }
+    void clearPath(File f) {
+      String root = dir.getAbsolutePath();
+      String dir = f.getAbsolutePath();
+      if ( dir.startsWith( root ) ) {
+        String[] dirNames = dir.substring( root.length() ).
+          split( File.separator + "subdir" );
+        if ( clearPath( f, dirNames, 1 ) )
+          return;
+      }
+      clearPath( f, null, -1 );
+    }
         
-        /*
-         * dirNames is an array of string integers derived from
-         * usual directory structure data/subdirN/subdirXY/subdirM ...
-         * If dirName array is non-null, we only check the child at 
-         * the children[dirNames[idx]]. This avoids iterating over
-         * children in common case. If directory structure changes 
-         * in later versions, we need to revisit this.
-         */
-        private boolean clearPath( File f, String[] dirNames, int idx ) {
-          if ( ( dirNames == null || idx == dirNames.length ) &&
-               dir.compareTo(f) == 0) {
-            numBlocks--;
-            return true;
-          }
+    /*
+     * dirNames is an array of string integers derived from
+     * usual directory structure data/subdirN/subdirXY/subdirM ...
+     * If dirName array is non-null, we only check the child at 
+     * the children[dirNames[idx]]. This avoids iterating over
+     * children in common case. If directory structure changes 
+     * in later versions, we need to revisit this.
+     */
+    private boolean clearPath( File f, String[] dirNames, int idx ) {
+      if ( ( dirNames == null || idx == dirNames.length ) &&
+           dir.compareTo(f) == 0) {
+        numBlocks--;
+        return true;
+      }
           
-          if ( dirNames != null ) {
-            //guess the child index from the directory name
-            if ( idx > ( dirNames.length - 1 ) || children == null ) {
-              return false;
-            }
-            int childIdx; 
-            try {
-              childIdx = Integer.parseInt( dirNames[idx] );
-            } catch ( NumberFormatException ignored ) {
-              // layout changed? we could print a warning.
-              return false;
-            }
-            return ( childIdx >= 0 && childIdx < children.length ) ?
-              children[childIdx].clearPath( f, dirNames, idx+1 ) : false;
-          }
+      if ( dirNames != null ) {
+        //guess the child index from the directory name
+        if ( idx > ( dirNames.length - 1 ) || children == null ) {
+          return false;
+        }
+        int childIdx; 
+        try {
+          childIdx = Integer.parseInt( dirNames[idx] );
+        } catch ( NumberFormatException ignored ) {
+          // layout changed? we could print a warning.
+          return false;
+        }
+        return ( childIdx >= 0 && childIdx < children.length ) ?
+          children[childIdx].clearPath( f, dirNames, idx+1 ) : false;
+      }
 
-          //guesses failed. back to blind iteration.
-          if ( children != null ) {
-            for(int i=0; i < children.length; i++) {
-              if ( children[i].clearPath( f, null, -1 ) ){
-                return true;
-              }
-            }
+      //guesses failed. back to blind iteration.
+      if ( children != null ) {
+        for(int i=0; i < children.length; i++) {
+          if ( children[i].clearPath( f, null, -1 ) ){
+            return true;
           }
-          return false;
         }
+      }
+      return false;
+    }
         
-        public String toString() {
-          return "FSDir{" +
-              "dir=" + dir +
-              ", children=" + (children == null ? null : Arrays.asList(children)) +
-              "}";
-        }
+    public String toString() {
+      return "FSDir{" +
+        "dir=" + dir +
+        ", children=" + (children == null ? null : Arrays.asList(children)) +
+        "}";
     }
+  }
 
-    class FSVolume {
-      static final double USABLE_DISK_PCT_DEFAULT = 0.98f; 
+  class FSVolume {
+    static final double USABLE_DISK_PCT_DEFAULT = 0.98f; 
 
-      private FSDir dataDir;
-      private File tmpDir;
-      private DF usage;
-      private long reserved;
-      private double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
+    private FSDir dataDir;
+    private File tmpDir;
+    private DF usage;
+    private long reserved;
+    private double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
     
-      FSVolume( File currentDir, Configuration conf) throws IOException {
-        this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
-        this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct",
-            (float) USABLE_DISK_PCT_DEFAULT);
-        File parent = currentDir.getParentFile();
-        this.dataDir = new FSDir( currentDir );
-        this.tmpDir = new File(parent, "tmp");
-        if (tmpDir.exists()) {
-          FileUtil.fullyDelete(tmpDir);
-        }
-        if (!tmpDir.mkdirs()) {
-          if (!tmpDir.isDirectory()) {
-            throw new IOException("Mkdirs failed to create " + tmpDir.toString());
-          }
+    FSVolume( File currentDir, Configuration conf) throws IOException {
+      this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
+      this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct",
+                                         (float) USABLE_DISK_PCT_DEFAULT);
+      File parent = currentDir.getParentFile();
+      this.dataDir = new FSDir( currentDir );
+      this.tmpDir = new File(parent, "tmp");
+      if (tmpDir.exists()) {
+        FileUtil.fullyDelete(tmpDir);
+      }
+      if (!tmpDir.mkdirs()) {
+        if (!tmpDir.isDirectory()) {
+          throw new IOException("Mkdirs failed to create " + tmpDir.toString());
         }
-        this.usage = new DF(parent, conf);
       }
+      this.usage = new DF(parent, conf);
+    }
       
-      long getCapacity() throws IOException {
-        return usage.getCapacity();
-      }
+    long getCapacity() throws IOException {
+      return usage.getCapacity();
+    }
       
-      long getAvailable() throws IOException {
-        long capacity = usage.getCapacity();
-        long freespace = Math.round(usage.getAvailableSkipRefresh() -
-                                    capacity * (1 - usableDiskPct) - reserved); 
-        return ( freespace > 0 ) ? freespace : 0;
-      }
+    long getAvailable() throws IOException {
+      long capacity = usage.getCapacity();
+      long freespace = Math.round(usage.getAvailableSkipRefresh() -
+                                  capacity * (1 - usableDiskPct) - reserved); 
+      return ( freespace > 0 ) ? freespace : 0;
+    }
       
-      String getMount() throws IOException {
-        return usage.getMount();
-      }
+    String getMount() throws IOException {
+      return usage.getMount();
+    }
       
-      File createTmpFile(Block b) throws IOException {
-        File f = new File(tmpDir, b.getBlockName());
-        try {
-          if (f.exists()) {
-            throw new IOException("Unexpected problem in creating temporary file for "+
-                b + ".  File " + f + " should not be present, but is.");
-          }
-          // Create the zero-length temp file
-          //
-          if (!f.createNewFile()) {
-            throw new IOException("Unexpected problem in creating temporary file for "+
-                b + ".  File " + f + " should be creatable, but is already present.");
-          }
-        } catch (IOException ie) {
-          System.out.println("Exception!  " + ie);
-          throw ie;
+    File createTmpFile(Block b) throws IOException {
+      File f = new File(tmpDir, b.getBlockName());
+      try {
+        if (f.exists()) {
+          throw new IOException("Unexpected problem in creating temporary file for "+
+                                b + ".  File " + f + " should not be present, but is.");
         }
-        return f;
+        // Create the zero-length temp file
+        //
+        if (!f.createNewFile()) {
+          throw new IOException("Unexpected problem in creating temporary file for "+
+                                b + ".  File " + f + " should be creatable, but is already present.");
+        }
+      } catch (IOException ie) {
+        System.out.println("Exception!  " + ie);
+        throw ie;
       }
+      return f;
+    }
       
-      File addBlock(Block b, File f) throws IOException {
-        return dataDir.addBlock(b, f);
-      }
+    File addBlock(Block b, File f) throws IOException {
+      return dataDir.addBlock(b, f);
+    }
       
-      void checkDirs() throws DiskErrorException {
-        dataDir.checkDirTree();
-        DiskChecker.checkDir(tmpDir);
-      }
+    void checkDirs() throws DiskErrorException {
+      dataDir.checkDirTree();
+      DiskChecker.checkDir(tmpDir);
+    }
       
-      void getBlockInfo(TreeSet<Block> blockSet) {
-        dataDir.getBlockInfo(blockSet);
-      }
+    void getBlockInfo(TreeSet<Block> blockSet) {
+      dataDir.getBlockInfo(blockSet);
+    }
       
-      void getVolumeMap(HashMap<Block, FSVolume> volumeMap) {
-        dataDir.getVolumeMap(volumeMap, this);
-      }
+    void getVolumeMap(HashMap<Block, FSVolume> volumeMap) {
+      dataDir.getVolumeMap(volumeMap, this);
+    }
       
-      void getBlockMap(HashMap<Block, File> blockMap) {
-        dataDir.getBlockMap(blockMap);
-      }
+    void getBlockMap(HashMap<Block, File> blockMap) {
+      dataDir.getBlockMap(blockMap);
+    }
       
-      void clearPath(File f) {
-        dataDir.clearPath(f);
-      }
+    void clearPath(File f) {
+      dataDir.clearPath(f);
+    }
       
-      public String toString() {
-        return dataDir.dir.getAbsolutePath();
-      }
+    public String toString() {
+      return dataDir.dir.getAbsolutePath();
     }
+  }
     
-    class FSVolumeSet {
-      FSVolume[] volumes = null;
-      int curVolume = 0;
+  class FSVolumeSet {
+    FSVolume[] volumes = null;
+    int curVolume = 0;
       
-      FSVolumeSet(FSVolume[] volumes) {
-        this.volumes = volumes;
-      }
+    FSVolumeSet(FSVolume[] volumes) {
+      this.volumes = volumes;
+    }
       
-      synchronized FSVolume getNextVolume(long blockSize) throws IOException {
-        int startVolume = curVolume;
-        while (true) {
-          FSVolume volume = volumes[curVolume];
-          curVolume = (curVolume + 1) % volumes.length;
-          if (volume.getAvailable() >= blockSize) { return volume; }
-          if (curVolume == startVolume) {
-            throw new DiskOutOfSpaceException("Insufficient space for an additional block");
-          }
+    synchronized FSVolume getNextVolume(long blockSize) throws IOException {
+      int startVolume = curVolume;
+      while (true) {
+        FSVolume volume = volumes[curVolume];
+        curVolume = (curVolume + 1) % volumes.length;
+        if (volume.getAvailable() >= blockSize) { return volume; }
+        if (curVolume == startVolume) {
+          throw new DiskOutOfSpaceException("Insufficient space for an additional block");
         }
       }
+    }
       
-      synchronized long getCapacity() throws IOException {
-        long capacity = 0L;
-        for (int idx = 0; idx < volumes.length; idx++) {
-            capacity += volumes[idx].getCapacity();
-        }
-        return capacity;
+    synchronized long getCapacity() throws IOException {
+      long capacity = 0L;
+      for (int idx = 0; idx < volumes.length; idx++) {
+        capacity += volumes[idx].getCapacity();
       }
+      return capacity;
+    }
       
-      synchronized long getRemaining() throws IOException {
-        long remaining = 0L;
-        for (int idx = 0; idx < volumes.length; idx++) {
-          remaining += volumes[idx].getAvailable();
-        }
-        return remaining;
+    synchronized long getRemaining() throws IOException {
+      long remaining = 0L;
+      for (int idx = 0; idx < volumes.length; idx++) {
+        remaining += volumes[idx].getAvailable();
       }
+      return remaining;
+    }
       
-      synchronized void getBlockInfo(TreeSet<Block> blockSet) {
-        for (int idx = 0; idx < volumes.length; idx++) {
-          volumes[idx].getBlockInfo(blockSet);
-        }
+    synchronized void getBlockInfo(TreeSet<Block> blockSet) {
+      for (int idx = 0; idx < volumes.length; idx++) {
+        volumes[idx].getBlockInfo(blockSet);
       }
+    }
       
-      synchronized void getVolumeMap(HashMap<Block, FSVolume> volumeMap) {
-        for (int idx = 0; idx < volumes.length; idx++) {
-          volumes[idx].getVolumeMap(volumeMap);
-        }
+    synchronized void getVolumeMap(HashMap<Block, FSVolume> volumeMap) {
+      for (int idx = 0; idx < volumes.length; idx++) {
+        volumes[idx].getVolumeMap(volumeMap);
       }
+    }
       
-      synchronized void getBlockMap(HashMap<Block, File> blockMap) {
-        for (int idx = 0; idx < volumes.length; idx++) {
-          volumes[idx].getBlockMap(blockMap);
-        }
+    synchronized void getBlockMap(HashMap<Block, File> blockMap) {
+      for (int idx = 0; idx < volumes.length; idx++) {
+        volumes[idx].getBlockMap(blockMap);
       }
+    }
       
-      synchronized void checkDirs() throws DiskErrorException {
-        for (int idx = 0; idx < volumes.length; idx++) {
-          volumes[idx].checkDirs();
-        }
+    synchronized void checkDirs() throws DiskErrorException {
+      for (int idx = 0; idx < volumes.length; idx++) {
+        volumes[idx].checkDirs();
       }
+    }
       
-      public String toString() {
-        StringBuffer sb = new StringBuffer();
-        for (int idx = 0; idx < volumes.length; idx++) {
-          sb.append(volumes[idx].toString());
-          if (idx != volumes.length - 1) { sb.append(","); }
-        }
-        return sb.toString();
+    public String toString() {
+      StringBuffer sb = new StringBuffer();
+      for (int idx = 0; idx < volumes.length; idx++) {
+        sb.append(volumes[idx].toString());
+        if (idx != volumes.length - 1) { sb.append(","); }
       }
+      return sb.toString();
     }
-    //////////////////////////////////////////////////////
-    //
-    // FSDataSet
-    //
-    //////////////////////////////////////////////////////
-
-    FSVolumeSet volumes;
-    private HashMap<Block,File> ongoingCreates = new HashMap<Block,File>();
-    private int maxBlocksPerDir = 0;
-    private HashMap<Block,FSVolume> volumeMap = null;
-    private HashMap<Block,File> blockMap = null;
-    static  Random random = new Random();
+  }
+  //////////////////////////////////////////////////////
+  //
+  // FSDataSet
+  //
+  //////////////////////////////////////////////////////
+
+  FSVolumeSet volumes;
+  private HashMap<Block,File> ongoingCreates = new HashMap<Block,File>();
+  private int maxBlocksPerDir = 0;
+  private HashMap<Block,FSVolume> volumeMap = null;
+  private HashMap<Block,File> blockMap = null;
+  static  Random random = new Random();
 
-    /**
-     * An FSDataset has a directory where it loads its data files.
-     */
-    public FSDataset( DataStorage storage, Configuration conf) throws IOException {
-    	this.maxBlocksPerDir = conf.getInt("dfs.datanode.numblocks", 64);
-        FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
-        for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
-          volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf);
-        }
-        volumes = new FSVolumeSet(volArray);
-        volumeMap = new HashMap<Block,FSVolume>();
-        volumes.getVolumeMap(volumeMap);
-        blockMap = new HashMap<Block,File>();
-        volumes.getBlockMap(blockMap);
+  /**
+   * An FSDataset has a directory where it loads its data files.
+   */
+  public FSDataset( DataStorage storage, Configuration conf) throws IOException {
+    this.maxBlocksPerDir = conf.getInt("dfs.datanode.numblocks", 64);
+    FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
+    for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
+      volArray[idx] = new FSVolume(storage.getStorageDir(idx).getCurrentDir(), conf);
     }
+    volumes = new FSVolumeSet(volArray);
+    volumeMap = new HashMap<Block,FSVolume>();
+    volumes.getVolumeMap(volumeMap);
+    blockMap = new HashMap<Block,File>();
+    volumes.getBlockMap(blockMap);
+  }
 
-    /**
-     * Return total capacity, used and unused
-     */
-    public long getCapacity() throws IOException {
-        return volumes.getCapacity();
-    }
+  /**
+   * Return total capacity, used and unused
+   */
+  public long getCapacity() throws IOException {
+    return volumes.getCapacity();
+  }
 
-    /**
-     * Return how many bytes can still be stored in the FSDataset
-     */
-    public long getRemaining() throws IOException {
-        return volumes.getRemaining();
-    }
+  /**
+   * Return how many bytes can still be stored in the FSDataset
+   */
+  public long getRemaining() throws IOException {
+    return volumes.getRemaining();
+  }
 
-    /**
-     * Find the block's on-disk length
-     */
-    public long getLength(Block b) throws IOException {
-        if (! isValidBlock(b)) {
-            throw new IOException("Block " + b + " is not valid.");
-        }
-        File f = getFile(b);
-        return f.length();
+  /**
+   * Find the block's on-disk length
+   */
+  public long getLength(Block b) throws IOException {
+    if (! isValidBlock(b)) {
+      throw new IOException("Block " + b + " is not valid.");
     }
+    File f = getFile(b);
+    return f.length();
+  }
 
-    /**
-     * Get a stream of data from the indicated block.
-     */
-    public synchronized InputStream getBlockData(Block b) throws IOException {
-        if (! isValidBlock(b)) {
-            throw new IOException("Block " + b + " is not valid.");
-        }
-        // File should be opened with the lock.
-        return new FileInputStream(getFile(b));
+  /**
+   * Get a stream of data from the indicated block.
+   */
+  public synchronized InputStream getBlockData(Block b) throws IOException {
+    if (! isValidBlock(b)) {
+      throw new IOException("Block " + b + " is not valid.");
     }
+    // File should be opened with the lock.
+    return new FileInputStream(getFile(b));
+  }
 
-    /**
-     * Start writing to a block file
-     */
-    public OutputStream writeToBlock(Block b) throws IOException {
-        //
-        // Make sure the block isn't a valid one - we're still creating it!
-        //
-        if (isValidBlock(b)) {
-            throw new IOException("Block " + b + " is valid, and cannot be written to.");
-        }
-        long blockSize = b.getNumBytes();
+  /**
+   * Start writing to a block file
+   */
+  public OutputStream writeToBlock(Block b) throws IOException {
+    //
+    // Make sure the block isn't a valid one - we're still creating it!
+    //
+    if (isValidBlock(b)) {
+      throw new IOException("Block " + b + " is valid, and cannot be written to.");
+    }
+    long blockSize = b.getNumBytes();
 
-        //
-        // Serialize access to /tmp, and check if file already there.
-        //
-        File f = null;
-        synchronized ( this ) {
-            //
-            // Is it already in the create process?
-            //
-            if (ongoingCreates.containsKey(b)) {
-              // check how old is the temp file - wait 1 hour
-              File tmp = (File)ongoingCreates.get(b);
-              if ((System.currentTimeMillis() - tmp.lastModified()) < 3600 * 1000) {
-                throw new IOException("Block " + b +
-                    " has already been started (though not completed), and thus cannot be created.");
-              } else {
-                // stale temp file - remove
-                if (!tmp.delete()) {
-                  throw new IOException("Can't write the block - unable to remove stale temp file " + tmp);
-                }
-                ongoingCreates.remove(b);
-              }
-            }
-            FSVolume v = null;
-            synchronized ( volumes ) {
-              v = volumes.getNextVolume(blockSize);
-              // create temporary file to hold block in the designated volume
-              f = v.createTmpFile(b);
-            }
-            ongoingCreates.put(b, f);
-            volumeMap.put(b, v);
+    //
+    // Serialize access to /tmp, and check if file already there.
+    //
+    File f = null;
+    synchronized ( this ) {
+      //
+      // Is it already in the create process?
+      //
+      if (ongoingCreates.containsKey(b)) {
+        // check how old is the temp file - wait 1 hour
+        File tmp = (File)ongoingCreates.get(b);
+        if ((System.currentTimeMillis() - tmp.lastModified()) < 3600 * 1000) {
+          throw new IOException("Block " + b +
+                                " has already been started (though not completed), and thus cannot be created.");
+        } else {
+          // stale temp file - remove
+          if (!tmp.delete()) {
+            throw new IOException("Can't write the block - unable to remove stale temp file " + tmp);
+          }
+          ongoingCreates.remove(b);
         }
-
-        //
-        // Finally, allow a writer to the block file
-        // REMIND - mjc - make this a filter stream that enforces a max
-        // block size, so clients can't go crazy
-        //
-        return new FileOutputStream(f);
+      }
+      FSVolume v = null;
+      synchronized ( volumes ) {
+        v = volumes.getNextVolume(blockSize);
+        // create temporary file to hold block in the designated volume
+        f = v.createTmpFile(b);
+      }
+      ongoingCreates.put(b, f);
+      volumeMap.put(b, v);
     }
 
     //
-    // REMIND - mjc - eventually we should have a timeout system
-    // in place to clean up block files left by abandoned clients.
-    // We should have some timer in place, so that if a blockfile
-    // is created but non-valid, and has been idle for >48 hours,
-    // we can GC it safely.
+    // Finally, allow a writer to the block file
+    // REMIND - mjc - make this a filter stream that enforces a max
+    // block size, so clients can't go crazy
     //
+    return new FileOutputStream(f);
+  }
 
-    /**
-     * Complete the block write!
-     */
-    public synchronized void finalizeBlock(Block b) throws IOException {
-        File f = ongoingCreates.get(b);
-        if (f == null || ! f.exists()) {
-          throw new IOException("No temporary file " + f + " for block " + b);
-        }
-        long finalLen = f.length();
-        b.setNumBytes(finalLen);
-        FSVolume v = volumeMap.get(b);
+  //
+  // REMIND - mjc - eventually we should have a timeout system
+  // in place to clean up block files left by abandoned clients.
+  // We should have some timer in place, so that if a blockfile
+  // is created but non-valid, and has been idle for >48 hours,
+  // we can GC it safely.
+  //
+
+  /**
+   * Complete the block write!
+   */
+  public synchronized void finalizeBlock(Block b) throws IOException {
+    File f = ongoingCreates.get(b);
+    if (f == null || ! f.exists()) {
+      throw new IOException("No temporary file " + f + " for block " + b);
+    }
+    long finalLen = f.length();
+    b.setNumBytes(finalLen);
+    FSVolume v = volumeMap.get(b);
         
-        File dest = null;
-        synchronized ( volumes ) {
-          dest = v.addBlock(b, f);
-        }
-        blockMap.put(b, dest);
-        ongoingCreates.remove(b);
+    File dest = null;
+    synchronized ( volumes ) {
+      dest = v.addBlock(b, f);
     }
+    blockMap.put(b, dest);
+    ongoingCreates.remove(b);
+  }
 
-    /**
-     * Return a table of block data
-     */
-    public Block[] getBlockReport() {
-        TreeSet<Block> blockSet = new TreeSet<Block>();
-        volumes.getBlockInfo(blockSet);
-        Block blockTable[] = new Block[blockSet.size()];
-        int i = 0;
-        for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) {
-            blockTable[i] = it.next();
-        }
-        return blockTable;
+  /**
+   * Return a table of block data
+   */
+  public Block[] getBlockReport() {
+    TreeSet<Block> blockSet = new TreeSet<Block>();
+    volumes.getBlockInfo(blockSet);
+    Block blockTable[] = new Block[blockSet.size()];
+    int i = 0;
+    for (Iterator<Block> it = blockSet.iterator(); it.hasNext(); i++) {
+      blockTable[i] = it.next();
     }
+    return blockTable;
+  }
 
-    /**
-     * Check whether the given block is a valid one.
-     */
-    public boolean isValidBlock(Block b) {
-        File f = getFile(b);
-        return (f!= null && f.exists());
-    }
+  /**
+   * Check whether the given block is a valid one.
+   */
+  public boolean isValidBlock(Block b) {
+    File f = getFile(b);
+    return (f!= null && f.exists());
+  }
 
-    /**
-     * We're informed that a block is no longer valid.  We
-     * could lazily garbage-collect the block, but why bother?
-     * just get rid of it.
-     */
-    public void invalidate(Block invalidBlks[]) throws IOException {
-      for (int i = 0; i < invalidBlks.length; i++) {
-        File f;
-        synchronized (this) {
-          f = getFile(invalidBlks[i]);
-          if (f == null) {
-            throw new IOException("Unexpected error trying to delete block "
-                                  + invalidBlks[i] + 
-                                  ". Block not found in blockMap.");
-          }
-          FSVolume v = volumeMap.get(invalidBlks[i]);
-          if (v == null) {
-            throw new IOException("Unexpected error trying to delete block "
-                                  + invalidBlks[i] + 
-                                  ". No volume for this block.");
-          }
-          File parent = f.getParentFile();
-          if (parent == null) {
-            throw new IOException("Unexpected error trying to delete block "
-                                  + invalidBlks[i] + 
-                                  ". Parent not found for file " + f + ".");
-          }
-          v.clearPath(parent);
-          blockMap.remove(invalidBlks[i]);
-          volumeMap.remove(invalidBlks[i]);
+  /**
+   * We're informed that a block is no longer valid.  We
+   * could lazily garbage-collect the block, but why bother?
+   * just get rid of it.
+   */
+  public void invalidate(Block invalidBlks[]) throws IOException {
+    for (int i = 0; i < invalidBlks.length; i++) {
+      File f;
+      synchronized (this) {
+        f = getFile(invalidBlks[i]);
+        if (f == null) {
+          throw new IOException("Unexpected error trying to delete block "
+                                + invalidBlks[i] + 
+                                ". Block not found in blockMap.");
         }
-        if (!f.delete()) {
-            throw new IOException("Unexpected error trying to delete block "
-                                  + invalidBlks[i] + " at file " + f);
+        FSVolume v = volumeMap.get(invalidBlks[i]);
+        if (v == null) {
+          throw new IOException("Unexpected error trying to delete block "
+                                + invalidBlks[i] + 
+                                ". No volume for this block.");
         }
-        DataNode.LOG.info("Deleting block " + invalidBlks[i]);
+        File parent = f.getParentFile();
+        if (parent == null) {
+          throw new IOException("Unexpected error trying to delete block "
+                                + invalidBlks[i] + 
+                                ". Parent not found for file " + f + ".");
+        }
+        v.clearPath(parent);
+        blockMap.remove(invalidBlks[i]);
+        volumeMap.remove(invalidBlks[i]);
+      }
+      if (!f.delete()) {
+        throw new IOException("Unexpected error trying to delete block "
+                              + invalidBlks[i] + " at file " + f);
       }
+      DataNode.LOG.info("Deleting block " + invalidBlks[i]);
     }
+  }
 
-    /**
-     * Turn the block identifier into a filename.
-     */
-    synchronized File getFile(Block b) {
-      return blockMap.get(b);
-    }
+  /**
+   * Turn the block identifier into a filename.
+   */
+  synchronized File getFile(Block b) {
+    return blockMap.get(b);
+  }
 
-    /**
-     * check if a data diretory is healthy
-     * @throws DiskErrorException
-     * @author hairong
-     */
-    void checkDataDir() throws DiskErrorException {
-        volumes.checkDirs();
-    }
+  /**
+   * check if a data diretory is healthy
+   * @throws DiskErrorException
+   * @author hairong
+   */
+  void checkDataDir() throws DiskErrorException {
+    volumes.checkDirs();
+  }
     
 
-    public String toString() {
-      return "FSDataset{dirpath='"+volumes+"'}";
-    }
+  public String toString() {
+    return "FSDataset{dirpath='"+volumes+"'}";
+  }
 
 }

+ 638 - 638
src/java/org/apache/hadoop/dfs/FSDirectory.java

@@ -39,722 +39,722 @@ import org.apache.hadoop.metrics.MetricsContext;
  *************************************************/
 class FSDirectory implements FSConstants {
 
-    /******************************************************
-     * We keep an in-memory representation of the file/block
-     * hierarchy.
-     * 
-     * TODO: Factor out INode to a standalone class.
-     ******************************************************/
-    class INode {
-        private String name;
-        private INode parent;
-        private TreeMap<String, INode> children = null;
-        private Block blocks[];
-        private short blockReplication;
-
-        /**
-         */
-        INode(String name, Block blocks[], short replication) {
-            this.name = name;
-            this.parent = null;
-            this.blocks = blocks;
-            this.blockReplication = replication;
-        }
-
-        /**
-         */
-        INode(String name) {
-            this.name = name;
-            this.parent = null;
-            this.blocks = null;
-            this.blockReplication = 0;
-        }
-
-        /**
-         * Check whether it's a directory
-         */
-        synchronized public boolean isDir() {
-          return (blocks == null);
-        }
-        
-        /**
-         * Get block replication for the file 
-         * @return block replication
-         */
-        public short getReplication() {
-          return this.blockReplication;
-        }
-        
-        /**
-         * Get local file name
-         * @return local file name
-         */
-        String getLocalName() {
-          return name;
-        }
-
-        /**
-         * Get file blocks 
-         * @return file blocks
-         */
-        Block[] getBlocks() {
-          return this.blocks;
-        }
-        
-        /**
-         * Get parent directory 
-         * @return parent INode
-         */
-        INode getParent() {
-          return this.parent;
-        }
-
-        /**
-         * Get children iterator
-         * @return Iterator of children
-         */
-        Iterator<INode> getChildIterator() {
-          return ( children != null ) ?  children.values().iterator() : null;
-            // instead of null, we could return a static empty iterator.
-        }
-        
-        void addChild(String name, INode node) {
-          if ( children == null ) {
-            children = new TreeMap<String, INode>();
-          }
-          children.put(name, node);
-        }
-
-        /**
-         * This is the external interface
-         */
-        INode getNode(String target) {
-            if ( target == null || 
-                ! target.startsWith("/") || target.length() == 0) {
-                return null;
-            } else if (parent == null && "/".equals(target)) {
-                return this;
-            } else {
-                Vector components = new Vector();
-                int start = 0;
-                int slashid = 0;
-                while (start < target.length() && (slashid = target.indexOf('/', start)) >= 0) {
-                    components.add(target.substring(start, slashid));
-                    start = slashid + 1;
-                }
-                if (start < target.length()) {
-                    components.add(target.substring(start));
-                }
-                return getNode(components, 0);
-            }
-        }
-
-        /**
-         */
-        INode getNode(Vector components, int index) {
-            if (! name.equals((String) components.elementAt(index))) {
-                return null;
-            }
-            if (index == components.size()-1) {
-                return this;
-            }
-
-            // Check with children
-            INode child = this.getChild((String)components.elementAt(index+1));
-            if (child == null) {
-                return null;
-            } else {
-                return child.getNode(components, index+1);
-            }
-        }
-        
-        INode getChild( String name) {
-          return (children == null) ? null : children.get( name );
-        }
+  /******************************************************
+   * We keep an in-memory representation of the file/block
+   * hierarchy.
+   * 
+   * TODO: Factor out INode to a standalone class.
+   ******************************************************/
+  class INode {
+    private String name;
+    private INode parent;
+    private TreeMap<String, INode> children = null;
+    private Block blocks[];
+    private short blockReplication;
 
-        /**
-         * Add new INode to the file tree.
-         * Find the parent and insert 
-         * 
-         * @param path file path
-         * @param newNode INode to be added
-         * @return null if the node already exists; inserted INode, otherwise
-         * @throws FileNotFoundException 
-         * @author shv
-         */
-        INode addNode(String path, INode newNode) throws FileNotFoundException {
-          File target = new File( path );
-          // find parent
-          Path parent = new Path(path).getParent();
-          if (parent == null) { // add root
-              return null;
-          }
-          INode parentNode = getNode(parent.toString());
-          if (parentNode == null) {
-              throw new FileNotFoundException(
-                      "Parent path does not exist: "+path);
-          }
-          if (!parentNode.isDir()) {
-        	  throw new FileNotFoundException(
-        			  "Parent path is not a directory: "+path);
-          }
-           // check whether the parent already has a node with that name
-          String name = newNode.name = target.getName();
-          if( parentNode.getChild( name ) != null ) {
-            return null;
-          }
-          // insert into the parent children list
-          parentNode.addChild(name, newNode);
-          newNode.parent = parentNode;
-          return newNode;
-        }
-
-        /**
-         */
-        boolean removeNode() {
-            if (parent == null) {
-                return false;
-            } else {
-                parent.children.remove(name);
-                return true;
-            }
-        }
-          
-        /**
-         * Collect all the blocks at this INode and all its children.
-         * This operation is performed after a node is removed from the tree,
-         * and we want to GC all the blocks at this node and below.
-         */
-        void collectSubtreeBlocks(Vector v) {
-            if (blocks != null) {
-                for (int i = 0; i < blocks.length; i++) {
-                    v.add(blocks[i]);
-                }
-            }
-            incrDeletedFileCount();
-            for (Iterator<INode> it = getChildIterator(); it != null &&
-                                                          it.hasNext(); ) {
-                it.next().collectSubtreeBlocks(v);
-            }
-        }
-
-        /**
-         */
-        int numItemsInTree() {
-            int total = 0;
-            for (Iterator<INode> it = getChildIterator(); it != null && 
-                                                          it.hasNext(); ) {
-                total += it.next().numItemsInTree();
-            }
-            return total + 1;
-        }
-
-        /**
-         */
-        String computeName() {
-            if (parent != null) {
-                return parent.computeName() + "/" + name;
-            } else {
-                return name;
-            }
-        }
-
-        /**
-         */
-        long computeFileLength() {
-            long total = 0;
-            if (blocks != null) {
-                for (int i = 0; i < blocks.length; i++) {
-                    total += blocks[i].getNumBytes();
-                }
-            }
-            return total;
-        }
-
-        /**
-         */
-        long computeContentsLength() {
-            long total = computeFileLength();
-            for (Iterator<INode> it = getChildIterator(); it != null && 
-                                                          it.hasNext(); ) {
-                total += it.next().computeContentsLength();
-            }
-            return total;
-        }
-
-        /**
-         * Get the block size of the first block
-         * @return the number of bytes
-         */
-        public long getBlockSize() {
-          if (blocks == null || blocks.length == 0) {
-            return 0;
-          } else {
-            return blocks[0].getNumBytes();
-          }
-        }
-        
-        /**
-         */
-        void listContents(Vector v) {
-            if (parent != null && blocks != null) {
-                v.add(this);
-            }
-
-            for (Iterator<INode> it = getChildIterator(); it != null && 
-                                                          it.hasNext(); ) {
-                v.add(it.next());
-            }
-        }
-    }
-
-    FSNamesystem namesystem = null;
-    INode rootDir = new INode("");
-    TreeMap activeLocks = new TreeMap();
-    FSImage fsImage;  
-    boolean ready = false;
-    // Metrics record
-    private MetricsRecord directoryMetrics = null;
-    
-    /** Access an existing dfs name directory. */
-    public FSDirectory(FSNamesystem ns) throws IOException {
-      this.fsImage = new FSImage();
-      namesystem = ns;
-      initialize();
+    /**
+     */
+    INode(String name, Block blocks[], short replication) {
+      this.name = name;
+      this.parent = null;
+      this.blocks = blocks;
+      this.blockReplication = replication;
     }
 
-    public FSDirectory(FSImage fsImage, FSNamesystem ns) throws IOException {
-      this.fsImage = fsImage;
-      namesystem = ns;
-      initialize();
-    }
-    
-    private void initialize() {
-      MetricsContext metricsContext = MetricsUtil.getContext("dfs");
-      directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
+    /**
+     */
+    INode(String name) {
+      this.name = name;
+      this.parent = null;
+      this.blocks = null;
+      this.blockReplication = 0;
     }
 
-    void loadFSImage( Collection<File> dataDirs,
-                      StartupOption startOpt ) throws IOException {
-      // format before starting up if requested
-      if( startOpt == StartupOption.FORMAT ) {
-        fsImage.setStorageDirectories( dataDirs );
-        fsImage.format();
-        startOpt = StartupOption.REGULAR;
-      }
-      try {
-        fsImage.recoverTransitionRead( dataDirs, startOpt );
-      } catch( IOException e ) {
-        fsImage.close();
-        throw e;
-      }
-      synchronized (this) {
-        this.ready = true;
-        this.notifyAll();
-      }
+    /**
+     * Check whether it's a directory
+     */
+    synchronized public boolean isDir() {
+      return (blocks == null);
     }
-
-    private void incrDeletedFileCount() {
-        directoryMetrics.incrMetric("files_deleted", 1);
-        directoryMetrics.update();
+        
+    /**
+     * Get block replication for the file 
+     * @return block replication
+     */
+    public short getReplication() {
+      return this.blockReplication;
     }
-    
+        
     /**
-     * Shutdown the filestore
+     * Get local file name
+     * @return local file name
      */
-    public void close() throws IOException {
-        fsImage.close();
+    String getLocalName() {
+      return name;
     }
 
     /**
-     * Block until the object is ready to be used.
+     * Get file blocks 
+     * @return file blocks
      */
-    void waitForReady() {
-        if (! ready) {
-            synchronized (this) {
-                while (!ready) {
-                    try {
-                        this.wait(5000);
-                    } catch (InterruptedException ie) {
-                    }
-                }
-            }
-        }
+    Block[] getBlocks() {
+      return this.blocks;
     }
-
+        
     /**
-     * Add the given filename to the fs.
+     * Get parent directory 
+     * @return parent INode
      */
-    public boolean addFile(UTF8 path, Block[] blocks, short replication) {
-        waitForReady();
-
-        // Always do an implicit mkdirs for parent directory tree
-        String pathString = path.toString();
-        if( ! mkdirs(new Path(pathString).getParent().toString()) ) {
-           return false;
-        }
-        INode newNode = new INode( new File(pathString).getName(), blocks, replication);
-        if( ! unprotectedAddFile(path, newNode) ) {
-           NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
-                    +"failed to add "+path+" with "
-                    +blocks.length+" blocks to the file system" );
-           return false;
-        }
-        // add create file record to log
-        fsImage.getEditLog().logCreateFile( newNode );
-        NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
-                +path+" with "+blocks.length+" blocks is added to the file system" );
-        return true;
+    INode getParent() {
+      return this.parent;
     }
-    
+
     /**
+     * Get children iterator
+     * @return Iterator of children
      */
-    boolean unprotectedAddFile(UTF8 path, INode newNode) {
-      synchronized (rootDir) {
-         try {
-            if( rootDir.addNode(path.toString(), newNode ) != null ) {
-                int nrBlocks = (newNode.blocks == null) ? 0 : newNode.blocks.length;
-                // Add file->block mapping
-                for (int i = 0; i < nrBlocks; i++)
-                    namesystem.blocksMap.addINode(newNode.blocks[i], newNode);
-                return true;
-            } else {
-                return false;
-            }
-        } catch (FileNotFoundException e ) {
-            return false;
-        }
-      }
+    Iterator<INode> getChildIterator() {
+      return ( children != null ) ?  children.values().iterator() : null;
+      // instead of null, we could return a static empty iterator.
     }
-    
-    boolean unprotectedAddFile(UTF8 path, Block[] blocks, short replication ) {
-      return unprotectedAddFile( path,  
-                    new INode( path.toString(), blocks, replication ));
+        
+    void addChild(String name, INode node) {
+      if ( children == null ) {
+        children = new TreeMap<String, INode>();
+      }
+      children.put(name, node);
     }
 
     /**
-     * Change the filename
+     * This is the external interface
      */
-    public boolean renameTo(UTF8 src, UTF8 dst) {
-      NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: "
-          +src+" to "+dst );
-      waitForReady();
-      if( ! unprotectedRenameTo(src, dst) )
-        return false;
-      fsImage.getEditLog().logRename(src, dst);
-      return true;
+    INode getNode(String target) {
+      if ( target == null || 
+           ! target.startsWith("/") || target.length() == 0) {
+        return null;
+      } else if (parent == null && "/".equals(target)) {
+        return this;
+      } else {
+        Vector components = new Vector();
+        int start = 0;
+        int slashid = 0;
+        while (start < target.length() && (slashid = target.indexOf('/', start)) >= 0) {
+          components.add(target.substring(start, slashid));
+          start = slashid + 1;
+        }
+        if (start < target.length()) {
+          components.add(target.substring(start));
+        }
+        return getNode(components, 0);
+      }
     }
 
     /**
      */
-    boolean unprotectedRenameTo(UTF8 src, UTF8 dst) {
-        synchronized(rootDir) {
-          String srcStr = src.toString();
-          String dstStr = dst.toString();
-            INode renamedNode = rootDir.getNode(srcStr);
-            if (renamedNode == null) {
-                NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                        +"failed to rename "+src+" to "+dst+ " because source does not exist" );
-                return false;
-            }
-            if (isDir(dst)) {
-              dstStr += "/" + new File(srcStr).getName();
-            }
-            if( rootDir.getNode(dstStr.toString()) != null ) {
-                NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                        +"failed to rename "+src+" to "+dstStr+ " because destination exists" );
-                return false;
-            }
-            renamedNode.removeNode();
-            
-            // the renamed node can be reused now
-            try {
-                if( rootDir.addNode(dstStr, renamedNode ) != null ) {
-                    NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: "
-                        +src+" is renamed to "+dst );
-                    return true;
-                }
-            } catch (FileNotFoundException e ) {
-                NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                        +"failed to rename "+src+" to "+dst );
-                try {
-                    rootDir.addNode(srcStr, renamedNode); // put it back
-                }catch(FileNotFoundException e2) {                
-                }
-            }
+    INode getNode(Vector components, int index) {
+      if (! name.equals((String) components.elementAt(index))) {
+        return null;
+      }
+      if (index == components.size()-1) {
+        return this;
+      }
 
-            return false;
-        }
+      // Check with children
+      INode child = this.getChild((String)components.elementAt(index+1));
+      if (child == null) {
+        return null;
+      } else {
+        return child.getNode(components, index+1);
+      }
+    }
+        
+    INode getChild( String name) {
+      return (children == null) ? null : children.get( name );
     }
 
     /**
-     * Set file replication
+     * Add new INode to the file tree.
+     * Find the parent and insert 
      * 
-     * @param src file name
-     * @param replication new replication
-     * @param oldReplication old replication - output parameter
-     * @return array of file blocks
-     * @throws IOException
+     * @param path file path
+     * @param newNode INode to be added
+     * @return null if the node already exists; inserted INode, otherwise
+     * @throws FileNotFoundException 
+     * @author shv
      */
-    Block[] setReplication( String src, 
-                            short replication,
-                            Vector oldReplication
-                           ) throws IOException {
-      waitForReady();
-      Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication );
-      if( fileBlocks != null )  // log replication change
-        fsImage.getEditLog().logSetReplication( src, replication );
-      return fileBlocks;
-    }
-
-    Block[] unprotectedSetReplication(  String src, 
-                                        short replication,
-                                        Vector oldReplication
-                                      ) throws IOException {
-      if( oldReplication == null )
-        oldReplication = new Vector();
-      oldReplication.setSize(1);
-      oldReplication.set( 0, new Integer(-1) );
-      Block[] fileBlocks = null;
-      synchronized(rootDir) {
-        INode fileNode = rootDir.getNode(src);
-        if (fileNode == null)
-          return null;
-        if( fileNode.isDir() )
-          return null;
-        oldReplication.set( 0, new Integer( fileNode.blockReplication ));
-        fileNode.blockReplication = replication;
-        fileBlocks = fileNode.blocks;
+    INode addNode(String path, INode newNode) throws FileNotFoundException {
+      File target = new File( path );
+      // find parent
+      Path parent = new Path(path).getParent();
+      if (parent == null) { // add root
+        return null;
+      }
+      INode parentNode = getNode(parent.toString());
+      if (parentNode == null) {
+        throw new FileNotFoundException(
+                                        "Parent path does not exist: "+path);
+      }
+      if (!parentNode.isDir()) {
+        throw new FileNotFoundException(
+                                        "Parent path is not a directory: "+path);
+      }
+      // check whether the parent already has a node with that name
+      String name = newNode.name = target.getName();
+      if( parentNode.getChild( name ) != null ) {
+        return null;
       }
-      return fileBlocks;
+      // insert into the parent children list
+      parentNode.addChild(name, newNode);
+      newNode.parent = parentNode;
+      return newNode;
     }
 
     /**
-     * Get the blocksize of a file
-     * @param filename the filename
-     * @return the number of bytes in the first block
-     * @throws IOException if it is a directory or does not exist.
      */
-    public long getBlockSize(String filename) throws IOException {
-      synchronized (rootDir) {
-        INode fileNode = rootDir.getNode(filename);
-        if (fileNode == null) {
-          throw new IOException("Unknown file: " + filename);
-        }
-        if (fileNode.isDir()) {
-          throw new IOException("Getting block size of a directory: " + 
-                                filename);
-        }
-        return fileNode.getBlockSize();
+    boolean removeNode() {
+      if (parent == null) {
+        return false;
+      } else {
+        parent.children.remove(name);
+        return true;
       }
     }
-    
-    /**
-     * Remove the file from management, return blocks
-     */
-    public Block[] delete(UTF8 src) {
-        NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "
-                +src );
-        waitForReady();
-        Block[] blocks = unprotectedDelete(src); 
-        if( blocks != null )
-          fsImage.getEditLog().logDelete( src );
-        return blocks;
-    }
-
+          
     /**
+     * Collect all the blocks at this INode and all its children.
+     * This operation is performed after a node is removed from the tree,
+     * and we want to GC all the blocks at this node and below.
      */
-    Block[] unprotectedDelete(UTF8 src) {
-        synchronized (rootDir) {
-            INode targetNode = rootDir.getNode(src.toString());
-            if (targetNode == null) {
-                NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
-                        +"failed to remove "+src+" because it does not exist" );
-                return null;
-            } else {
-                //
-                // Remove the node from the namespace and GC all
-                // the blocks underneath the node.
-                //
-                if (! targetNode.removeNode()) {
-                    NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
-                            +"failed to remove "+src+" because it does not have a parent" );
-                    return null;
-                } else {
-                    NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
-                            +src+" is removed" );
-                    Vector v = new Vector();
-                    targetNode.collectSubtreeBlocks(v);
-                    for (Iterator it = v.iterator(); it.hasNext(); ) {
-                        Block b = (Block) it.next();
-                        namesystem.blocksMap.removeINode(b);
-                    }
-                    return (Block[]) v.toArray(new Block[v.size()]);
-                }
-            }
+    void collectSubtreeBlocks(Vector v) {
+      if (blocks != null) {
+        for (int i = 0; i < blocks.length; i++) {
+          v.add(blocks[i]);
         }
+      }
+      incrDeletedFileCount();
+      for (Iterator<INode> it = getChildIterator(); it != null &&
+             it.hasNext(); ) {
+        it.next().collectSubtreeBlocks(v);
+      }
     }
 
     /**
      */
-    public int obtainLock(UTF8 src, UTF8 holder, boolean exclusive) {
-        TreeSet holders = (TreeSet) activeLocks.get(src);
-        if (holders == null) {
-            holders = new TreeSet();
-            activeLocks.put(src, holders);
-        }
-        if (exclusive && holders.size() > 0) {
-            return STILL_WAITING;
-        } else {
-            holders.add(holder);
-            return COMPLETE_SUCCESS;
-        }
+    int numItemsInTree() {
+      int total = 0;
+      for (Iterator<INode> it = getChildIterator(); it != null && 
+             it.hasNext(); ) {
+        total += it.next().numItemsInTree();
+      }
+      return total + 1;
     }
 
     /**
      */
-    public int releaseLock(UTF8 src, UTF8 holder) {
-        TreeSet holders = (TreeSet) activeLocks.get(src);
-        if (holders != null && holders.contains(holder)) {
-            holders.remove(holder);
-            if (holders.size() == 0) {
-                activeLocks.remove(src);
-            }
-            return COMPLETE_SUCCESS;
-        } else {
-            return OPERATION_FAILED;
-        }
+    String computeName() {
+      if (parent != null) {
+        return parent.computeName() + "/" + name;
+      } else {
+        return name;
+      }
     }
 
     /**
-     * Get a listing of files given path 'src'
-     *
-     * This function is admittedly very inefficient right now.  We'll
-     * make it better later.
      */
-    public DFSFileInfo[] getListing(UTF8 src) {
-        String srcs = normalizePath(src);
-
-        synchronized (rootDir) {
-            INode targetNode = rootDir.getNode(srcs);
-            if (targetNode == null) {
-                return null;
-            } else {
-                Vector contents = new Vector();
-                targetNode.listContents(contents);
-
-                DFSFileInfo listing[] = new DFSFileInfo[contents.size()];
-                int i = 0;
-                for (Iterator it = contents.iterator(); it.hasNext(); i++) {
-                    listing[i] = new DFSFileInfo( (INode) it.next() );
-                }
-                return listing;
-            }
+    long computeFileLength() {
+      long total = 0;
+      if (blocks != null) {
+        for (int i = 0; i < blocks.length; i++) {
+          total += blocks[i].getNumBytes();
         }
+      }
+      return total;
     }
 
     /**
-     * Get the blocks associated with the file
-     */
-    public Block[] getFile(UTF8 src) {
-        waitForReady();
-        synchronized (rootDir) {
-            INode targetNode = rootDir.getNode(src.toString());
-            if (targetNode == null) {
-                return null;
-            } else {
-                return targetNode.blocks;
-            }
-        }
-    }
-
-    /** 
-     * Check whether the filepath could be created
      */
-    public boolean isValidToCreate(UTF8 src) {
-        String srcs = normalizePath(src);
-        synchronized (rootDir) {
-            if (srcs.startsWith("/") && 
-                ! srcs.endsWith("/") && 
-                rootDir.getNode(srcs) == null) {
-                return true;
-            } else {
-                return false;
-            }
-        }
+    long computeContentsLength() {
+      long total = computeFileLength();
+      for (Iterator<INode> it = getChildIterator(); it != null && 
+             it.hasNext(); ) {
+        total += it.next().computeContentsLength();
+      }
+      return total;
     }
 
     /**
-     * Check whether the path specifies a directory
+     * Get the block size of the first block
+     * @return the number of bytes
      */
-    public boolean isDir(UTF8 src) {
-        synchronized (rootDir) {
-            INode node = rootDir.getNode(normalizePath(src));
-            return node != null && node.isDir();
-        }
+    public long getBlockSize() {
+      if (blocks == null || blocks.length == 0) {
+        return 0;
+      } else {
+        return blocks[0].getNumBytes();
+      }
     }
-
+        
     /**
-     * Create directory entries for every item
      */
-    boolean mkdirs(String src) {
-        src = normalizePath(new UTF8(src));
-
-        // Use this to collect all the dirs we need to construct
-        Vector v = new Vector();
+    void listContents(Vector v) {
+      if (parent != null && blocks != null) {
+        v.add(this);
+      }
 
-        // The dir itself
-        v.add(src);
+      for (Iterator<INode> it = getChildIterator(); it != null && 
+             it.hasNext(); ) {
+        v.add(it.next());
+      }
+    }
+  }
+
+  FSNamesystem namesystem = null;
+  INode rootDir = new INode("");
+  TreeMap activeLocks = new TreeMap();
+  FSImage fsImage;  
+  boolean ready = false;
+  // Metrics record
+  private MetricsRecord directoryMetrics = null;
+    
+  /** Access an existing dfs name directory. */
+  public FSDirectory(FSNamesystem ns) throws IOException {
+    this.fsImage = new FSImage();
+    namesystem = ns;
+    initialize();
+  }
+
+  public FSDirectory(FSImage fsImage, FSNamesystem ns) throws IOException {
+    this.fsImage = fsImage;
+    namesystem = ns;
+    initialize();
+  }
+    
+  private void initialize() {
+    MetricsContext metricsContext = MetricsUtil.getContext("dfs");
+    directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
+  }
+
+  void loadFSImage( Collection<File> dataDirs,
+                    StartupOption startOpt ) throws IOException {
+    // format before starting up if requested
+    if( startOpt == StartupOption.FORMAT ) {
+      fsImage.setStorageDirectories( dataDirs );
+      fsImage.format();
+      startOpt = StartupOption.REGULAR;
+    }
+    try {
+      fsImage.recoverTransitionRead( dataDirs, startOpt );
+    } catch( IOException e ) {
+      fsImage.close();
+      throw e;
+    }
+    synchronized (this) {
+      this.ready = true;
+      this.notifyAll();
+    }
+  }
 
-        // All its parents
-        Path parent = new Path(src).getParent();
-        while (parent != null) {
-            v.add(parent.toString());
-            parent = parent.getParent();
+  private void incrDeletedFileCount() {
+    directoryMetrics.incrMetric("files_deleted", 1);
+    directoryMetrics.update();
+  }
+    
+  /**
+   * Shutdown the filestore
+   */
+  public void close() throws IOException {
+    fsImage.close();
+  }
+
+  /**
+   * Block until the object is ready to be used.
+   */
+  void waitForReady() {
+    if (! ready) {
+      synchronized (this) {
+        while (!ready) {
+          try {
+            this.wait(5000);
+          } catch (InterruptedException ie) {
+          }
+        }
+      }
+    }
+  }
+
+  /**
+   * Add the given filename to the fs.
+   */
+  public boolean addFile(UTF8 path, Block[] blocks, short replication) {
+    waitForReady();
+
+    // Always do an implicit mkdirs for parent directory tree
+    String pathString = path.toString();
+    if( ! mkdirs(new Path(pathString).getParent().toString()) ) {
+      return false;
+    }
+    INode newNode = new INode( new File(pathString).getName(), blocks, replication);
+    if( ! unprotectedAddFile(path, newNode) ) {
+      NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
+                                   +"failed to add "+path+" with "
+                                   +blocks.length+" blocks to the file system" );
+      return false;
+    }
+    // add create file record to log
+    fsImage.getEditLog().logCreateFile( newNode );
+    NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
+                                  +path+" with "+blocks.length+" blocks is added to the file system" );
+    return true;
+  }
+    
+  /**
+   */
+  boolean unprotectedAddFile(UTF8 path, INode newNode) {
+    synchronized (rootDir) {
+      try {
+        if( rootDir.addNode(path.toString(), newNode ) != null ) {
+          int nrBlocks = (newNode.blocks == null) ? 0 : newNode.blocks.length;
+          // Add file->block mapping
+          for (int i = 0; i < nrBlocks; i++)
+            namesystem.blocksMap.addINode(newNode.blocks[i], newNode);
+          return true;
+        } else {
+          return false;
         }
+      } catch (FileNotFoundException e ) {
+        return false;
+      }
+    }
+  }
+    
+  boolean unprotectedAddFile(UTF8 path, Block[] blocks, short replication ) {
+    return unprotectedAddFile( path,  
+                               new INode( path.toString(), blocks, replication ));
+  }
+
+  /**
+   * Change the filename
+   */
+  public boolean renameTo(UTF8 src, UTF8 dst) {
+    NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: "
+                                  +src+" to "+dst );
+    waitForReady();
+    if( ! unprotectedRenameTo(src, dst) )
+      return false;
+    fsImage.getEditLog().logRename(src, dst);
+    return true;
+  }
+
+  /**
+   */
+  boolean unprotectedRenameTo(UTF8 src, UTF8 dst) {
+    synchronized(rootDir) {
+      String srcStr = src.toString();
+      String dstStr = dst.toString();
+      INode renamedNode = rootDir.getNode(srcStr);
+      if (renamedNode == null) {
+        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+                                     +"failed to rename "+src+" to "+dst+ " because source does not exist" );
+        return false;
+      }
+      if (isDir(dst)) {
+        dstStr += "/" + new File(srcStr).getName();
+      }
+      if( rootDir.getNode(dstStr.toString()) != null ) {
+        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+                                     +"failed to rename "+src+" to "+dstStr+ " because destination exists" );
+        return false;
+      }
+      renamedNode.removeNode();
+            
+      // the renamed node can be reused now
+      try {
+        if( rootDir.addNode(dstStr, renamedNode ) != null ) {
+          NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: "
+                                        +src+" is renamed to "+dst );
+          return true;
+        }
+      } catch (FileNotFoundException e ) {
+        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
+                                     +"failed to rename "+src+" to "+dst );
+        try {
+          rootDir.addNode(srcStr, renamedNode); // put it back
+        }catch(FileNotFoundException e2) {                
+        }
+      }
 
-        // Now go backwards through list of dirs, creating along
-        // the way
-        int numElts = v.size();
-        for (int i = numElts - 1; i >= 0; i--) {
-            String cur = (String) v.elementAt(i);
-            try {
-               INode inserted = unprotectedMkdir(cur);
-               if (inserted != null) {
-                   NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
-                        +"created directory "+cur );
-                   fsImage.getEditLog().logMkDir( inserted );
-               } else { // otherwise cur exists, verify that it is a directory
-                 if (!isDir(new UTF8(cur))) {
-                   NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
-                        +"path " + cur + " is not a directory ");
-                   return false;
-                 } 
-               }
-            } catch (FileNotFoundException e ) {
-                NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
-                        +"failed to create directory "+src);
-                return false;
-            }
+      return false;
+    }
+  }
+
+  /**
+   * Set file replication
+   * 
+   * @param src file name
+   * @param replication new replication
+   * @param oldReplication old replication - output parameter
+   * @return array of file blocks
+   * @throws IOException
+   */
+  Block[] setReplication( String src, 
+                          short replication,
+                          Vector oldReplication
+                          ) throws IOException {
+    waitForReady();
+    Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication );
+    if( fileBlocks != null )  // log replication change
+      fsImage.getEditLog().logSetReplication( src, replication );
+    return fileBlocks;
+  }
+
+  Block[] unprotectedSetReplication(  String src, 
+                                      short replication,
+                                      Vector oldReplication
+                                      ) throws IOException {
+    if( oldReplication == null )
+      oldReplication = new Vector();
+    oldReplication.setSize(1);
+    oldReplication.set( 0, new Integer(-1) );
+    Block[] fileBlocks = null;
+    synchronized(rootDir) {
+      INode fileNode = rootDir.getNode(src);
+      if (fileNode == null)
+        return null;
+      if( fileNode.isDir() )
+        return null;
+      oldReplication.set( 0, new Integer( fileNode.blockReplication ));
+      fileNode.blockReplication = replication;
+      fileBlocks = fileNode.blocks;
+    }
+    return fileBlocks;
+  }
+
+  /**
+   * Get the blocksize of a file
+   * @param filename the filename
+   * @return the number of bytes in the first block
+   * @throws IOException if it is a directory or does not exist.
+   */
+  public long getBlockSize(String filename) throws IOException {
+    synchronized (rootDir) {
+      INode fileNode = rootDir.getNode(filename);
+      if (fileNode == null) {
+        throw new IOException("Unknown file: " + filename);
+      }
+      if (fileNode.isDir()) {
+        throw new IOException("Getting block size of a directory: " + 
+                              filename);
+      }
+      return fileNode.getBlockSize();
+    }
+  }
+    
+  /**
+   * Remove the file from management, return blocks
+   */
+  public Block[] delete(UTF8 src) {
+    NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "
+                                  +src );
+    waitForReady();
+    Block[] blocks = unprotectedDelete(src); 
+    if( blocks != null )
+      fsImage.getEditLog().logDelete( src );
+    return blocks;
+  }
+
+  /**
+   */
+  Block[] unprotectedDelete(UTF8 src) {
+    synchronized (rootDir) {
+      INode targetNode = rootDir.getNode(src.toString());
+      if (targetNode == null) {
+        NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
+                                     +"failed to remove "+src+" because it does not exist" );
+        return null;
+      } else {
+        //
+        // Remove the node from the namespace and GC all
+        // the blocks underneath the node.
+        //
+        if (! targetNode.removeNode()) {
+          NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
+                                       +"failed to remove "+src+" because it does not have a parent" );
+          return null;
+        } else {
+          NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
+                                        +src+" is removed" );
+          Vector v = new Vector();
+          targetNode.collectSubtreeBlocks(v);
+          for (Iterator it = v.iterator(); it.hasNext(); ) {
+            Block b = (Block) it.next();
+            namesystem.blocksMap.removeINode(b);
+          }
+          return (Block[]) v.toArray(new Block[v.size()]);
         }
+      }
+    }
+  }
+
+  /**
+   */
+  public int obtainLock(UTF8 src, UTF8 holder, boolean exclusive) {
+    TreeSet holders = (TreeSet) activeLocks.get(src);
+    if (holders == null) {
+      holders = new TreeSet();
+      activeLocks.put(src, holders);
+    }
+    if (exclusive && holders.size() > 0) {
+      return STILL_WAITING;
+    } else {
+      holders.add(holder);
+      return COMPLETE_SUCCESS;
+    }
+  }
+
+  /**
+   */
+  public int releaseLock(UTF8 src, UTF8 holder) {
+    TreeSet holders = (TreeSet) activeLocks.get(src);
+    if (holders != null && holders.contains(holder)) {
+      holders.remove(holder);
+      if (holders.size() == 0) {
+        activeLocks.remove(src);
+      }
+      return COMPLETE_SUCCESS;
+    } else {
+      return OPERATION_FAILED;
+    }
+  }
+
+  /**
+   * Get a listing of files given path 'src'
+   *
+   * This function is admittedly very inefficient right now.  We'll
+   * make it better later.
+   */
+  public DFSFileInfo[] getListing(UTF8 src) {
+    String srcs = normalizePath(src);
+
+    synchronized (rootDir) {
+      INode targetNode = rootDir.getNode(srcs);
+      if (targetNode == null) {
+        return null;
+      } else {
+        Vector contents = new Vector();
+        targetNode.listContents(contents);
+
+        DFSFileInfo listing[] = new DFSFileInfo[contents.size()];
+        int i = 0;
+        for (Iterator it = contents.iterator(); it.hasNext(); i++) {
+          listing[i] = new DFSFileInfo( (INode) it.next() );
+        }
+        return listing;
+      }
+    }
+  }
+
+  /**
+   * Get the blocks associated with the file
+   */
+  public Block[] getFile(UTF8 src) {
+    waitForReady();
+    synchronized (rootDir) {
+      INode targetNode = rootDir.getNode(src.toString());
+      if (targetNode == null) {
+        return null;
+      } else {
+        return targetNode.blocks;
+      }
+    }
+  }
+
+  /** 
+   * Check whether the filepath could be created
+   */
+  public boolean isValidToCreate(UTF8 src) {
+    String srcs = normalizePath(src);
+    synchronized (rootDir) {
+      if (srcs.startsWith("/") && 
+          ! srcs.endsWith("/") && 
+          rootDir.getNode(srcs) == null) {
         return true;
+      } else {
+        return false;
+      }
     }
+  }
+
+  /**
+   * Check whether the path specifies a directory
+   */
+  public boolean isDir(UTF8 src) {
+    synchronized (rootDir) {
+      INode node = rootDir.getNode(normalizePath(src));
+      return node != null && node.isDir();
+    }
+  }
 
-    /**
-     */
-    INode unprotectedMkdir(String src) throws FileNotFoundException {
-        synchronized (rootDir) {
-            return rootDir.addNode(src, new INode(new File(src).getName()));
-        }
+  /**
+   * Create directory entries for every item
+   */
+  boolean mkdirs(String src) {
+    src = normalizePath(new UTF8(src));
+
+    // Use this to collect all the dirs we need to construct
+    Vector v = new Vector();
+
+    // The dir itself
+    v.add(src);
+
+    // All its parents
+    Path parent = new Path(src).getParent();
+    while (parent != null) {
+      v.add(parent.toString());
+      parent = parent.getParent();
     }
 
-    /**
-     */
-    String normalizePath(UTF8 src) {
-        String srcs = src.toString();
-        if (srcs.length() > 1 && srcs.endsWith("/")) {
-            srcs = srcs.substring(0, srcs.length() - 1);
+    // Now go backwards through list of dirs, creating along
+    // the way
+    int numElts = v.size();
+    for (int i = numElts - 1; i >= 0; i--) {
+      String cur = (String) v.elementAt(i);
+      try {
+        INode inserted = unprotectedMkdir(cur);
+        if (inserted != null) {
+          NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
+                                        +"created directory "+cur );
+          fsImage.getEditLog().logMkDir( inserted );
+        } else { // otherwise cur exists, verify that it is a directory
+          if (!isDir(new UTF8(cur))) {
+            NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
+                                          +"path " + cur + " is not a directory ");
+            return false;
+          } 
         }
-        return srcs;
+      } catch (FileNotFoundException e ) {
+        NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
+                                      +"failed to create directory "+src);
+        return false;
+      }
+    }
+    return true;
+  }
+
+  /**
+   */
+  INode unprotectedMkdir(String src) throws FileNotFoundException {
+    synchronized (rootDir) {
+      return rootDir.addNode(src, new INode(new File(src).getName()));
+    }
+  }
+
+  /**
+   */
+  String normalizePath(UTF8 src) {
+    String srcs = src.toString();
+    if (srcs.length() > 1 && srcs.endsWith("/")) {
+      srcs = srcs.substring(0, srcs.length() - 1);
     }
+    return srcs;
+  }
 }

+ 27 - 27
src/java/org/apache/hadoop/dfs/FSEditLog.java

@@ -155,19 +155,19 @@ class FSEditLog {
    * remain, then raise an exception that will possibly cause the
    * server to exit
    */
-   void processIOError(int index) throws IOException {
-     if (editStreams == null || editStreams.size() == 1) {
-       throw new IOException("Checkpoint directories inaccessible.");
-     }
-     assert(index < getNumStorageDirs());
-     assert(getNumStorageDirs() == editStreams.size());
+  void processIOError(int index) throws IOException {
+    if (editStreams == null || editStreams.size() == 1) {
+      throw new IOException("Checkpoint directories inaccessible.");
+    }
+    assert(index < getNumStorageDirs());
+    assert(getNumStorageDirs() == editStreams.size());
 
-     editStreams.remove( index );
-     //
-     // Invoke the ioerror routine of the fsimage
-     //
-     fsimage.processIOError(index);
-   }
+    editStreams.remove( index );
+    //
+    // Invoke the ioerror routine of the fsimage
+    //
+    fsimage.processIOError(index);
+  }
 
   /**
    * check if ANY edits.new log exists
@@ -194,8 +194,8 @@ class FSEditLog {
     
     if (edits != null) {
       DataInputStream in = new DataInputStream(
-          new BufferedInputStream(
-              new FileInputStream(edits)));
+                                               new BufferedInputStream(
+                                                                       new FileInputStream(edits)));
       // Read log file version. Could be missing. 
       in.mark( 4 );
       // If edits log is greater than 2G, available method will return negative
@@ -214,10 +214,10 @@ class FSEditLog {
           logVersion = in.readInt();
         if( logVersion < FSConstants.LAYOUT_VERSION ) // future version
           throw new IOException(
-              "Unexpected version of the file system log file: "
-              + logVersion
-              + ". Current version = " 
-              + FSConstants.LAYOUT_VERSION + "." );
+                                "Unexpected version of the file system log file: "
+                                + logVersion
+                                + ". Current version = " 
+                                + FSConstants.LAYOUT_VERSION + "." );
       }
       
       short replication = fsNamesys.getDefaultReplication();
@@ -245,10 +245,10 @@ class FSEditLog {
               writables = aw.get(); 
               if( writables.length != 2 )
                 throw new IOException("Incorrect data fortmat. " 
-                    + "Name & replication pair expected");
+                                      + "Name & replication pair expected");
               name = (UTF8) writables[0];
               replication = Short.parseShort(
-                  ((UTF8)writables[1]).toString());
+                                             ((UTF8)writables[1]).toString());
               replication = adjustReplication( replication );
             }
             // get blocks
@@ -268,8 +268,8 @@ class FSEditLog {
             repl.readFields(in);
             replication = adjustReplication( fromLogReplication(repl) );
             fsDir.unprotectedSetReplication(src.toString(), 
-                replication,
-                null);
+                                            replication,
+                                            null);
             break;
           } 
           case OP_RENAME: {
@@ -295,7 +295,7 @@ class FSEditLog {
           case OP_DATANODE_ADD: {
             if( logVersion > -3 )
               throw new IOException("Unexpected opcode " + opcode 
-                  + " for version " + logVersion );
+                                    + " for version " + logVersion );
             FSImage.DatanodeImage nodeimage = new FSImage.DatanodeImage();
             nodeimage.readFields(in);
             DatanodeDescriptor node = nodeimage.getDatanodeDescriptor();
@@ -305,7 +305,7 @@ class FSEditLog {
           case OP_DATANODE_REMOVE: {
             if( logVersion > -3 )
               throw new IOException("Unexpected opcode " + opcode 
-                  + " for version " + logVersion );
+                                    + " for version " + logVersion );
             DatanodeID nodeID = new DatanodeID();
             nodeID.readFields(in);
             DatanodeDescriptor node = fsNamesys.getDatanode( nodeID );
@@ -379,8 +379,8 @@ class FSEditLog {
    */
   void logCreateFile( FSDirectory.INode newNode ) {
     UTF8 nameReplicationPair[] = new UTF8[] { 
-                        new UTF8( newNode.computeName() ), 
-                        FSEditLog.toLogReplication( newNode.getReplication() )};
+      new UTF8( newNode.computeName() ), 
+      FSEditLog.toLogReplication( newNode.getReplication() )};
     logEdit(OP_ADD,
             new ArrayWritable( UTF8.class, nameReplicationPair ), 
             new ArrayWritable( Block.class, newNode.getBlocks() ));
@@ -524,6 +524,6 @@ class FSEditLog {
    * Return the name of the edit file
    */
   File getFsEditName() throws IOException {
-      return getEditFile( 0 );
+    return getEditFile( 0 );
   }
 }

+ 33 - 33
src/java/org/apache/hadoop/dfs/FSImage.java

@@ -131,14 +131,14 @@ class FSImage extends Storage {
    */
   void recoverTransitionRead( Collection<File> dataDirs,
                               StartupOption startOpt
-                            ) throws IOException {
+                              ) throws IOException {
     assert startOpt != StartupOption.FORMAT : 
       "NameNode formatting should be performed before reading the image";
     // 1. For each data directory calculate its state and 
     // check whether all is consistent before transitioning.
     this.storageDirs = new ArrayList<StorageDirectory>( dataDirs.size() );
     AbstractList<StorageState> dataDirStates = 
-                                new ArrayList<StorageState>( dataDirs.size() );
+      new ArrayList<StorageState>( dataDirs.size() );
     boolean isFormatted = false;
     for( Iterator<File> it = dataDirs.iterator(); it.hasNext(); ) {
       File dataDir = it.next();
@@ -151,7 +151,7 @@ class FSImage extends Storage {
         case NON_EXISTENT:
           // name-node fails if any of the configured storage dirs are missing
           throw new InconsistentFSStateException( sd.root,
-              "storage directory does not exist or is not accessible." );
+                                                  "storage directory does not exist or is not accessible." );
         case NOT_FORMATTED:
           break;
         case CONVERT:
@@ -179,16 +179,16 @@ class FSImage extends Storage {
 
     if( dataDirs.size() == 0 )  // none of the data dirs exist
       throw new IOException( 
-          "All specified directories are not accessible or do not exist." );
+                            "All specified directories are not accessible or do not exist." );
     if( ! isFormatted && startOpt != StartupOption.ROLLBACK )
       throw new IOException( "NameNode is not formatted." );
     if( startOpt != StartupOption.UPGRADE
         && layoutVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION
         && layoutVersion != FSConstants.LAYOUT_VERSION )
       throw new IOException( 
-          "\nFile system image contains an old layout version " + layoutVersion
-          + ".\nAn upgrade to version " + FSConstants.LAYOUT_VERSION
-          + " is required.\nPlease restart NameNode with -upgrade option." );
+                            "\nFile system image contains an old layout version " + layoutVersion
+                            + ".\nAn upgrade to version " + FSConstants.LAYOUT_VERSION
+                            + " is required.\nPlease restart NameNode with -upgrade option." );
 
     // 2. Format unformatted dirs.
     this.checkpointTime = 0L;
@@ -233,8 +233,8 @@ class FSImage extends Storage {
       StorageDirectory sd = getStorageDir( idx );
       if( sd.getPreviousDir().exists() )
         throw new InconsistentFSStateException( sd.root,
-          "previous fs state should not exist during upgrade. "
-            + "Finalize or rollback first." );
+                                                "previous fs state should not exist during upgrade. "
+                                                + "Finalize or rollback first." );
     }
 
     // load the latest image
@@ -249,10 +249,10 @@ class FSImage extends Storage {
     for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
       StorageDirectory sd = getStorageDir( idx );
       LOG.info( "Upgrading image directory " + sd.root 
-              + ".\n   old LV = " + oldLV
-              + "; old CTime = " + oldCTime
-              + ".\n   new LV = " + this.getLayoutVersion()
-              + "; new CTime = " + this.getCTime() );
+                + ".\n   old LV = " + oldLV
+                + "; old CTime = " + oldCTime
+                + ".\n   new LV = " + this.getLayoutVersion()
+                + "; new CTime = " + this.getCTime() );
       File curDir = sd.getCurrentDir();
       File prevDir = sd.getPreviousDir();
       File tmpDir = sd.getPreviousTmp();
@@ -288,7 +288,7 @@ class FSImage extends Storage {
       File prevDir = sd.getPreviousDir();
       if( ! prevDir.exists() ) {  // use current directory then
         LOG.info( "Storage directory " + sd.root
-                + " does not contain previous fs state." );
+                  + " does not contain previous fs state." );
         sd.read(); // read and verify consistency with other directories
         continue;
       }
@@ -298,7 +298,7 @@ class FSImage extends Storage {
     }
     if( ! canRollback )
       throw new IOException( "Cannot rollback. " 
-            + "None of the storage directories contain previous fs state." );
+                             + "None of the storage directories contain previous fs state." );
 
     // Now that we know all directories are going to be consistent
     // Do rollback for each directory containing previous state
@@ -309,8 +309,8 @@ class FSImage extends Storage {
         continue;
 
       LOG.info( "Rolling back storage directory " + sd.root 
-          + ".\n   new LV = " + prevState.getLayoutVersion()
-          + "; new CTime = " + prevState.getCTime() );
+                + ".\n   new LV = " + prevState.getLayoutVersion()
+                + "; new CTime = " + prevState.getCTime() );
       File tmpDir = sd.getRemovedTmp();
       assert ! tmpDir.exists() : "removed.tmp directory must not exist.";
       // rename current to tmp
@@ -332,9 +332,9 @@ class FSImage extends Storage {
     if( ! prevDir.exists() )
       return; // already discarded
     LOG.info( "Finalizing upgrade for storage directory " 
-            + sd.root 
-            + ".\n   cur LV = " + this.getLayoutVersion()
-            + "; cur CTime = " + this.getCTime() );
+              + sd.root 
+              + ".\n   cur LV = " + this.getLayoutVersion()
+              + "; cur CTime = " + this.getCTime() );
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     final File tmpDir = sd.getFinalizedTmp();
     // rename previous to tmp and remove
@@ -355,7 +355,7 @@ class FSImage extends Storage {
 
   protected void getFields( Properties props, 
                             StorageDirectory sd 
-                          ) throws IOException {
+                            ) throws IOException {
     super.getFields( props, sd );
     if( layoutVersion == 0 )
       throw new IOException("NameNode directory " 
@@ -389,7 +389,7 @@ class FSImage extends Storage {
    */
   protected void setFields( Properties props, 
                             StorageDirectory sd 
-                          ) throws IOException {
+                            ) throws IOException {
     super.setFields( props, sd );
     writeCheckpointTime( sd );
   }
@@ -406,7 +406,7 @@ class FSImage extends Storage {
     File timeFile = getImageFile( sd, NameNodeFile.TIME );
     if (timeFile.exists()) { timeFile.delete(); }
     DataOutputStream out = new DataOutputStream(
-          new FileOutputStream(timeFile));
+                                                new FileOutputStream(timeFile));
     try {
       out.writeLong( checkpointTime );
     } finally {
@@ -439,10 +439,10 @@ class FSImage extends Storage {
     // check consistency of the old storage
     if( ! oldImageDir.isDirectory() )
       throw new InconsistentFSStateException( sd.root,
-          oldImageDir + " is not a directory." );
+                                              oldImageDir + " is not a directory." );
     if( ! oldImageDir.canWrite() )
       throw new InconsistentFSStateException( sd.root,
-          oldImageDir + " is not writable." );
+                                              oldImageDir + " is not writable." );
     return true;
   }
   
@@ -454,8 +454,8 @@ class FSImage extends Storage {
     File oldImage = new File( oldImageDir, "fsimage" );
     
     LOG.info( "Old layout version directory " + oldImageDir
-            + " is found. New layout version is "
-            + FSConstants.LAYOUT_VERSION );
+              + " is found. New layout version is "
+              + FSConstants.LAYOUT_VERSION );
     LOG.info( "Trying to convert ..." );
 
     // we did not use locking for the pre upgrade layout, so we cannot prevent 
@@ -603,8 +603,8 @@ class FSImage extends Storage {
     boolean needToSave = true;
     int imgVersion = this.getLayoutVersion();
     DataInputStream in = new DataInputStream(
-                            new BufferedInputStream(
-                                new FileInputStream(curFile)));
+                                             new BufferedInputStream(
+                                                                     new FileInputStream(curFile)));
     try {
       /*
        * TODO we need to change format of the image file
@@ -685,8 +685,8 @@ class FSImage extends Storage {
     // Write out data
     //
     DataOutputStream out = new DataOutputStream(
-          new BufferedOutputStream(
-          new FileOutputStream(newFile)));
+                                                new BufferedOutputStream(
+                                                                         new FileOutputStream(newFile)));
     try {
       out.writeInt(FSConstants.LAYOUT_VERSION);
       out.writeInt(namespaceID);
@@ -745,7 +745,7 @@ class FSImage extends Storage {
       sd.unlock();
     }
     LOG.info( "Storage directory " + sd.root 
-        + " has been successfully formatted." );
+              + " has been successfully formatted." );
   }
 
   public void format() throws IOException {
@@ -780,7 +780,7 @@ class FSImage extends Storage {
       }
     }
     for(Iterator<INode> it = root.getChildIterator(); it != null &&
-                                                      it.hasNext(); ) {
+          it.hasNext(); ) {
       saveImage( fullName, it.next(), out );
     }
   }

Різницю між файлами не показано, бо вона завелика
+ 1710 - 1710
src/java/org/apache/hadoop/dfs/FSNamesystem.java


+ 1 - 1
src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java

@@ -31,7 +31,7 @@ class InconsistentFSStateException extends IOException {
 
   public InconsistentFSStateException( File dir, String descr ) {
     super( "Directory " + getFilePath( dir )
-          + " is in an inconsistent state: " + descr );
+           + " is in an inconsistent state: " + descr );
   }
 
   public InconsistentFSStateException( File dir, String descr, Throwable ex ) {

+ 2 - 2
src/java/org/apache/hadoop/dfs/IncorrectVersionException.java

@@ -35,8 +35,8 @@ class IncorrectVersionException extends IOException {
                                     String ofWhat,
                                     int versionExpected ) {
     super( "Unexpected version " 
-        + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
-        + versionReported + ". Expecting = " + versionExpected + "." );
+           + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
+           + versionReported + ". Expecting = " + versionExpected + "." );
   }
 
 }

+ 190 - 190
src/java/org/apache/hadoop/dfs/JspHelper.java

@@ -29,220 +29,220 @@ import org.apache.hadoop.io.*;
 import org.apache.hadoop.conf.*;
 
 public class JspHelper {
-    static FSNamesystem fsn = null;
-    static InetSocketAddress nameNodeAddr;
-    static Configuration conf = new Configuration();
+  static FSNamesystem fsn = null;
+  static InetSocketAddress nameNodeAddr;
+  static Configuration conf = new Configuration();
 
-    static int defaultChunkSizeToView = 
-                        conf.getInt("dfs.default.chunk.view.size",32 * 1024);
-    static Random rand = new Random();
+  static int defaultChunkSizeToView = 
+    conf.getInt("dfs.default.chunk.view.size",32 * 1024);
+  static Random rand = new Random();
 
-    public JspHelper() {
-      if (DataNode.getDataNode() != null) {
-        nameNodeAddr = DataNode.getDataNode().getNameNodeAddr();
-      }
-      else {
-        fsn = FSNamesystem.getFSNamesystem();
-        nameNodeAddr = new InetSocketAddress(fsn.getDFSNameNodeMachine(),
-                  fsn.getDFSNameNodePort()); 
-      }      
+  public JspHelper() {
+    if (DataNode.getDataNode() != null) {
+      nameNodeAddr = DataNode.getDataNode().getNameNodeAddr();
+    }
+    else {
+      fsn = FSNamesystem.getFSNamesystem();
+      nameNodeAddr = new InetSocketAddress(fsn.getDFSNameNodeMachine(),
+                                           fsn.getDFSNameNodePort()); 
+    }      
+  }
+  public DatanodeInfo bestNode(LocatedBlock blk) throws IOException {
+    TreeSet deadNodes = new TreeSet();
+    DatanodeInfo chosenNode = null;
+    int failures = 0;
+    Socket s = null;
+    DatanodeInfo [] nodes = blk.getLocations();
+    if (nodes == null || nodes.length == 0) {
+      throw new IOException("No nodes contain this block");
     }
-    public DatanodeInfo bestNode(LocatedBlock blk) throws IOException {
-      TreeSet deadNodes = new TreeSet();
-      DatanodeInfo chosenNode = null;
-      int failures = 0;
-      Socket s = null;
-      DatanodeInfo [] nodes = blk.getLocations();
-      if (nodes == null || nodes.length == 0) {
-        throw new IOException("No nodes contain this block");
+    while (s == null) {
+      if (chosenNode == null) {
+        do {
+          chosenNode = nodes[rand.nextInt(nodes.length)];
+        } while (deadNodes.contains(chosenNode));
       }
-      while (s == null) {
-        if (chosenNode == null) {
-          do {
-            chosenNode = nodes[rand.nextInt(nodes.length)];
-          } while (deadNodes.contains(chosenNode));
-        }
-        int index = rand.nextInt(nodes.length);
-        chosenNode = nodes[index];
+      int index = rand.nextInt(nodes.length);
+      chosenNode = nodes[index];
 
-        //just ping to check whether the node is alive
-        InetSocketAddress targetAddr = DataNode.createSocketAddr(chosenNode.getHost() + ":" + chosenNode.getInfoPort());
-        
-        try {
-          s = new Socket();
-          s.connect(targetAddr, FSConstants.READ_TIMEOUT);
-          s.setSoTimeout(FSConstants.READ_TIMEOUT);
-        } catch (IOException e) {
-          deadNodes.add(chosenNode);
-          s.close();
-          s = null;
-          failures++;
-        }
-        if (failures == nodes.length)
-          throw new IOException("Could not reach the block containing the data. Please try again");
+      //just ping to check whether the node is alive
+      InetSocketAddress targetAddr = DataNode.createSocketAddr(chosenNode.getHost() + ":" + chosenNode.getInfoPort());
         
+      try {
+        s = new Socket();
+        s.connect(targetAddr, FSConstants.READ_TIMEOUT);
+        s.setSoTimeout(FSConstants.READ_TIMEOUT);
+      } catch (IOException e) {
+        deadNodes.add(chosenNode);
+        s.close();
+        s = null;
+        failures++;
       }
-      s.close();
-      return chosenNode;
+      if (failures == nodes.length)
+        throw new IOException("Could not reach the block containing the data. Please try again");
+        
     }
-    public void streamBlockInAscii(InetSocketAddress addr, long blockId, long blockSize, 
-            long offsetIntoBlock, long chunkSizeToView, JspWriter out) 
-      throws IOException {
-      if (chunkSizeToView == 0) return;
-      Socket s = new Socket();
-      s.connect(addr, FSConstants.READ_TIMEOUT);
-      s.setSoTimeout(FSConstants.READ_TIMEOUT);
-      //
-      // Xmit header info to datanode
-      //
-      DataOutputStream os = new DataOutputStream(new BufferedOutputStream(s.getOutputStream()));
-      os.write(FSConstants.OP_READSKIP_BLOCK);
-      new Block(blockId, blockSize).write(os);
-      os.writeLong(offsetIntoBlock);
-      os.flush();
+    s.close();
+    return chosenNode;
+  }
+  public void streamBlockInAscii(InetSocketAddress addr, long blockId, long blockSize, 
+                                 long offsetIntoBlock, long chunkSizeToView, JspWriter out) 
+    throws IOException {
+    if (chunkSizeToView == 0) return;
+    Socket s = new Socket();
+    s.connect(addr, FSConstants.READ_TIMEOUT);
+    s.setSoTimeout(FSConstants.READ_TIMEOUT);
+    //
+    // Xmit header info to datanode
+    //
+    DataOutputStream os = new DataOutputStream(new BufferedOutputStream(s.getOutputStream()));
+    os.write(FSConstants.OP_READSKIP_BLOCK);
+    new Block(blockId, blockSize).write(os);
+    os.writeLong(offsetIntoBlock);
+    os.flush();
 
-      //
-      // Get bytes in block, set streams
-      //
-      DataInputStream in = new DataInputStream(new BufferedInputStream(s.getInputStream()));
-      long curBlockSize = in.readLong();
-      long amtSkipped = in.readLong();
-      if (curBlockSize != blockSize) {
-        throw new IOException("Recorded block size is " + blockSize + ", but datanode reports size of " + curBlockSize);
-      }
-      if (amtSkipped != offsetIntoBlock) {
-        throw new IOException("Asked for offset of " + offsetIntoBlock + ", but only received offset of " + amtSkipped);
-      }
-      
-      long amtToRead = chunkSizeToView;
-      if (amtToRead + offsetIntoBlock > blockSize)
-        amtToRead = blockSize - offsetIntoBlock;
-      byte[] buf = new byte[(int)amtToRead];
-      int readOffset = 0;
-      int retries = 2;
-      while (true) {
-        int numRead;
-        try {
-          numRead = in.read(buf, readOffset, (int)amtToRead);
-        }
-        catch (IOException e) {
-          retries--;
-          if (retries == 0)
-            throw new IOException("Could not read data from datanode");
-          continue;
-        }
-        amtToRead -= numRead;
-        readOffset += numRead;
-        if (amtToRead == 0)
-          break;
-      }
-      s.close();
-      in.close();
-      out.print(new String(buf));
+    //
+    // Get bytes in block, set streams
+    //
+    DataInputStream in = new DataInputStream(new BufferedInputStream(s.getInputStream()));
+    long curBlockSize = in.readLong();
+    long amtSkipped = in.readLong();
+    if (curBlockSize != blockSize) {
+      throw new IOException("Recorded block size is " + blockSize + ", but datanode reports size of " + curBlockSize);
     }
-    public void DFSNodesStatus( ArrayList<DatanodeDescriptor> live,
-                                ArrayList<DatanodeDescriptor> dead ) {
-        if ( fsn != null )
-            fsn.DFSNodesStatus(live, dead);
+    if (amtSkipped != offsetIntoBlock) {
+      throw new IOException("Asked for offset of " + offsetIntoBlock + ", but only received offset of " + amtSkipped);
     }
-    public void addTableHeader(JspWriter out) throws IOException {
-      out.print("<table border=\"1\""+
-                " cellpadding=\"2\" cellspacing=\"2\">");
-      out.print("<tbody>");
-    }
-    public void addTableRow(JspWriter out, String[] columns) throws IOException {
-      out.print("<tr>");
-      for (int i = 0; i < columns.length; i++) {
-        out.print("<td style=\"vertical-align: top;\"><B>"+columns[i]+"</B><br></td>");
+      
+    long amtToRead = chunkSizeToView;
+    if (amtToRead + offsetIntoBlock > blockSize)
+      amtToRead = blockSize - offsetIntoBlock;
+    byte[] buf = new byte[(int)amtToRead];
+    int readOffset = 0;
+    int retries = 2;
+    while (true) {
+      int numRead;
+      try {
+        numRead = in.read(buf, readOffset, (int)amtToRead);
       }
-      out.print("</tr>");
+      catch (IOException e) {
+        retries--;
+        if (retries == 0)
+          throw new IOException("Could not read data from datanode");
+        continue;
+      }
+      amtToRead -= numRead;
+      readOffset += numRead;
+      if (amtToRead == 0)
+        break;
+    }
+    s.close();
+    in.close();
+    out.print(new String(buf));
+  }
+  public void DFSNodesStatus( ArrayList<DatanodeDescriptor> live,
+                              ArrayList<DatanodeDescriptor> dead ) {
+    if ( fsn != null )
+      fsn.DFSNodesStatus(live, dead);
+  }
+  public void addTableHeader(JspWriter out) throws IOException {
+    out.print("<table border=\"1\""+
+              " cellpadding=\"2\" cellspacing=\"2\">");
+    out.print("<tbody>");
+  }
+  public void addTableRow(JspWriter out, String[] columns) throws IOException {
+    out.print("<tr>");
+    for (int i = 0; i < columns.length; i++) {
+      out.print("<td style=\"vertical-align: top;\"><B>"+columns[i]+"</B><br></td>");
     }
-    public void addTableRow(JspWriter out, String[] columns, int row) throws IOException {
-      out.print("<tr>");
+    out.print("</tr>");
+  }
+  public void addTableRow(JspWriter out, String[] columns, int row) throws IOException {
+    out.print("<tr>");
       
-      for (int i = 0; i < columns.length; i++) {
-        if( row/2*2 == row ) {//even
-          out.print("<td style=\"vertical-align: top;background-color:LightGrey;\"><B>"+columns[i]+"</B><br></td>");
-        } else {
-          out.print("<td style=\"vertical-align: top;background-color:LightBlue;\"><B>"+columns[i]+"</B><br></td>");
+    for (int i = 0; i < columns.length; i++) {
+      if( row/2*2 == row ) {//even
+        out.print("<td style=\"vertical-align: top;background-color:LightGrey;\"><B>"+columns[i]+"</B><br></td>");
+      } else {
+        out.print("<td style=\"vertical-align: top;background-color:LightBlue;\"><B>"+columns[i]+"</B><br></td>");
           
-        }
       }
-      out.print("</tr>");
-    }
-    public void addTableFooter(JspWriter out) throws IOException {
-      out.print("</tbody></table>");
     }
+    out.print("</tr>");
+  }
+  public void addTableFooter(JspWriter out) throws IOException {
+    out.print("</tbody></table>");
+  }
 
-    public String getSafeModeText() {
-      if( ! fsn.isInSafeMode() )
-        return "";
-      return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
-    }
+  public String getSafeModeText() {
+    if( ! fsn.isInSafeMode() )
+      return "";
+    return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
+  }
     
-    public void sortNodeList(ArrayList<DatanodeDescriptor> nodes,
-                             String field, String order) {
+  public void sortNodeList(ArrayList<DatanodeDescriptor> nodes,
+                           String field, String order) {
         
-        class NodeComapare implements Comparator<DatanodeDescriptor> {
-            static final int 
-                FIELD_NAME              = 1,
-                FIELD_LAST_CONTACT      = 2,
-                FIELD_BLOCKS            = 3,
-                FIELD_SIZE              = 4,
-                FIELD_DISK_USED         = 5,
-                SORT_ORDER_ASC          = 1,
-                SORT_ORDER_DSC          = 2;
+    class NodeComapare implements Comparator<DatanodeDescriptor> {
+      static final int 
+        FIELD_NAME              = 1,
+        FIELD_LAST_CONTACT      = 2,
+        FIELD_BLOCKS            = 3,
+        FIELD_SIZE              = 4,
+        FIELD_DISK_USED         = 5,
+        SORT_ORDER_ASC          = 1,
+        SORT_ORDER_DSC          = 2;
 
-            int sortField = FIELD_NAME;
-            int sortOrder = SORT_ORDER_ASC;
+      int sortField = FIELD_NAME;
+      int sortOrder = SORT_ORDER_ASC;
             
-            public NodeComapare(String field, String order) {
-                if ( field.equals( "lastcontact" ) ) {
-                    sortField = FIELD_LAST_CONTACT;
-                } else if ( field.equals( "size" ) ) {
-                    sortField = FIELD_SIZE;
-                } else if ( field.equals( "blocks" ) ) {
-                    sortField = FIELD_BLOCKS;
-                } else if ( field.equals( "pcused" ) ) {
-                    sortField = FIELD_DISK_USED;
-                } else {
-                    sortField = FIELD_NAME;
-                }
+      public NodeComapare(String field, String order) {
+        if ( field.equals( "lastcontact" ) ) {
+          sortField = FIELD_LAST_CONTACT;
+        } else if ( field.equals( "size" ) ) {
+          sortField = FIELD_SIZE;
+        } else if ( field.equals( "blocks" ) ) {
+          sortField = FIELD_BLOCKS;
+        } else if ( field.equals( "pcused" ) ) {
+          sortField = FIELD_DISK_USED;
+        } else {
+          sortField = FIELD_NAME;
+        }
                 
-                if ( order.equals("DSC") ) {
-                    sortOrder = SORT_ORDER_DSC;
-                } else {
-                    sortOrder = SORT_ORDER_ASC;
-                }
-            }
+        if ( order.equals("DSC") ) {
+          sortOrder = SORT_ORDER_DSC;
+        } else {
+          sortOrder = SORT_ORDER_ASC;
+        }
+      }
 
-            public int compare( DatanodeDescriptor d1,
-                                DatanodeDescriptor d2 ) {
-                int ret = 0;
-                switch ( sortField ) {
-                case FIELD_LAST_CONTACT:
-                    ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
-                    break;
-                case FIELD_BLOCKS:
-                    ret = d1.numBlocks() - d2.numBlocks();
-                    break;
-                case FIELD_SIZE:
-                    long  dlong = d1.getCapacity() - d2.getCapacity();
-                    ret = (dlong < 0) ? -1 : ( (dlong > 0) ? 1 : 0 );
-                    break;
-                case FIELD_DISK_USED:
-                    double ddbl =((d2.getRemaining()*1.0/d2.getCapacity())-
-                                  (d1.getRemaining()*1.0/d1.getCapacity()));
-                    ret = (ddbl < 0) ? -1 : ( (ddbl > 0) ? 1 : 0 );
-                    break;
-                case FIELD_NAME: 
-                    ret = d1.getHostName().compareTo(d2.getHostName());
-                    break;
-                }
-                return ( sortOrder == SORT_ORDER_DSC ) ? -ret : ret;
-            }
+      public int compare( DatanodeDescriptor d1,
+                          DatanodeDescriptor d2 ) {
+        int ret = 0;
+        switch ( sortField ) {
+        case FIELD_LAST_CONTACT:
+          ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
+          break;
+        case FIELD_BLOCKS:
+          ret = d1.numBlocks() - d2.numBlocks();
+          break;
+        case FIELD_SIZE:
+          long  dlong = d1.getCapacity() - d2.getCapacity();
+          ret = (dlong < 0) ? -1 : ( (dlong > 0) ? 1 : 0 );
+          break;
+        case FIELD_DISK_USED:
+          double ddbl =((d2.getRemaining()*1.0/d2.getCapacity())-
+                        (d1.getRemaining()*1.0/d1.getCapacity()));
+          ret = (ddbl < 0) ? -1 : ( (ddbl > 0) ? 1 : 0 );
+          break;
+        case FIELD_NAME: 
+          ret = d1.getHostName().compareTo(d2.getHostName());
+          break;
         }
-        
-        Collections.sort( nodes, new NodeComapare( field, order ) );
+        return ( sortOrder == SORT_ORDER_DSC ) ? -ret : ret;
+      }
     }
+        
+    Collections.sort( nodes, new NodeComapare( field, order ) );
+  }
 }

+ 49 - 49
src/java/org/apache/hadoop/dfs/LocatedBlock.java

@@ -29,62 +29,62 @@ import java.io.*;
  ****************************************************/
 class LocatedBlock implements Writable {
 
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (LocatedBlock.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new LocatedBlock(); }
-         });
-    }
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (LocatedBlock.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new LocatedBlock(); }
+       });
+  }
 
-    Block b;
-    DatanodeInfo locs[];
+  Block b;
+  DatanodeInfo locs[];
 
-    /**
-     */
-    public LocatedBlock() {
-        this.b = new Block();
-        this.locs = new DatanodeInfo[0];
-    }
+  /**
+   */
+  public LocatedBlock() {
+    this.b = new Block();
+    this.locs = new DatanodeInfo[0];
+  }
 
-    /**
-     */
-    public LocatedBlock(Block b, DatanodeInfo[] locs) {
-        this.b = b;
-        this.locs = locs;
-    }
+  /**
+   */
+  public LocatedBlock(Block b, DatanodeInfo[] locs) {
+    this.b = b;
+    this.locs = locs;
+  }
 
-    /**
-     */
-    public Block getBlock() {
-        return b;
-    }
+  /**
+   */
+  public Block getBlock() {
+    return b;
+  }
 
-    /**
-     */
-    DatanodeInfo[] getLocations() {
-        return locs;
-    }
+  /**
+   */
+  DatanodeInfo[] getLocations() {
+    return locs;
+  }
 
-    ///////////////////////////////////////////
-    // Writable
-    ///////////////////////////////////////////
-    public void write(DataOutput out) throws IOException {
-        b.write(out);
-        out.writeInt(locs.length);
-        for (int i = 0; i < locs.length; i++) {
-            locs[i].write(out);
-        }
+  ///////////////////////////////////////////
+  // Writable
+  ///////////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    b.write(out);
+    out.writeInt(locs.length);
+    for (int i = 0; i < locs.length; i++) {
+      locs[i].write(out);
     }
+  }
 
-    public void readFields(DataInput in) throws IOException {
-        this.b = new Block();
-        b.readFields(in);
-        int count = in.readInt();
-        this.locs = new DatanodeInfo[count];
-        for (int i = 0; i < locs.length; i++) {
-            locs[i] = new DatanodeInfo();
-            locs[i].readFields(in);
-        }
+  public void readFields(DataInput in) throws IOException {
+    this.b = new Block();
+    b.readFields(in);
+    int count = in.readInt();
+    this.locs = new DatanodeInfo[count];
+    for (int i = 0; i < locs.length; i++) {
+      locs[i] = new DatanodeInfo();
+      locs[i].readFields(in);
     }
+  }
 }

+ 697 - 697
src/java/org/apache/hadoop/dfs/NameNode.java

@@ -69,751 +69,751 @@ import org.apache.hadoop.metrics.Updater;
  * @author Mike Cafarella
  **********************************************************/
 public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
-    public long getProtocolVersion(String protocol, 
-                                   long clientVersion) throws IOException { 
-      if (protocol.equals(ClientProtocol.class.getName())) {
-        return ClientProtocol.versionID; 
-      } else if (protocol.equals(DatanodeProtocol.class.getName())){
-        return DatanodeProtocol.versionID;
-      } else {
-        throw new IOException("Unknown protocol to name node: " + protocol);
-      }
-    }
+  public long getProtocolVersion(String protocol, 
+                                 long clientVersion) throws IOException { 
+    if (protocol.equals(ClientProtocol.class.getName())) {
+      return ClientProtocol.versionID; 
+    } else if (protocol.equals(DatanodeProtocol.class.getName())){
+      return DatanodeProtocol.versionID;
+    } else {
+      throw new IOException("Unknown protocol to name node: " + protocol);
+    }
+  }
     
-    public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
-    public static final Log stateChangeLog = LogFactory.getLog( "org.apache.hadoop.dfs.StateChange");
+  public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
+  public static final Log stateChangeLog = LogFactory.getLog( "org.apache.hadoop.dfs.StateChange");
 
-    private FSNamesystem namesystem;
-    private Server server;
-    private Thread emptier;
-    private int handlerCount = 2;
+  private FSNamesystem namesystem;
+  private Server server;
+  private Thread emptier;
+  private int handlerCount = 2;
     
-    private InetSocketAddress nameNodeAddress = null;
+  private InetSocketAddress nameNodeAddress = null;
     
-    /** only used for testing purposes  */
-    private boolean stopRequested = false;
-
-    /** Format a new filesystem.  Destroys any filesystem that may already
-     * exist at this location.  **/
-    public static void format(Configuration conf) throws IOException {
-      format( conf, false );
-    }
-
-    private class NameNodeMetrics implements Updater {
-      private final MetricsRecord metricsRecord;
-      private int numFilesCreated = 0;
-      private int numFilesOpened = 0;
-      private int numFilesRenamed = 0;
-      private int numFilesListed = 0;
+  /** only used for testing purposes  */
+  private boolean stopRequested = false;
+
+  /** Format a new filesystem.  Destroys any filesystem that may already
+   * exist at this location.  **/
+  public static void format(Configuration conf) throws IOException {
+    format( conf, false );
+  }
+
+  private class NameNodeMetrics implements Updater {
+    private final MetricsRecord metricsRecord;
+    private int numFilesCreated = 0;
+    private int numFilesOpened = 0;
+    private int numFilesRenamed = 0;
+    private int numFilesListed = 0;
       
-      NameNodeMetrics() {
-        MetricsContext metricsContext = MetricsUtil.getContext("dfs");
-        metricsRecord = MetricsUtil.createRecord(metricsContext, "namenode");
-        metricsContext.registerUpdater(this);
-      }
+    NameNodeMetrics() {
+      MetricsContext metricsContext = MetricsUtil.getContext("dfs");
+      metricsRecord = MetricsUtil.createRecord(metricsContext, "namenode");
+      metricsContext.registerUpdater(this);
+    }
       
-      /**
-       * Since this object is a registered updater, this method will be called
-       * periodically, e.g. every 5 seconds.
-       */
-      public void doUpdates(MetricsContext unused) {
-        synchronized (this) {
-          metricsRecord.incrMetric("files_created", numFilesCreated);
-          metricsRecord.incrMetric("files_opened", numFilesOpened);
-          metricsRecord.incrMetric("files_renamed", numFilesRenamed);
-          metricsRecord.incrMetric("files_listed", numFilesListed);
+    /**
+     * Since this object is a registered updater, this method will be called
+     * periodically, e.g. every 5 seconds.
+     */
+    public void doUpdates(MetricsContext unused) {
+      synchronized (this) {
+        metricsRecord.incrMetric("files_created", numFilesCreated);
+        metricsRecord.incrMetric("files_opened", numFilesOpened);
+        metricsRecord.incrMetric("files_renamed", numFilesRenamed);
+        metricsRecord.incrMetric("files_listed", numFilesListed);
               
-          numFilesCreated = 0;
-          numFilesOpened = 0;
-          numFilesRenamed = 0;
-          numFilesListed = 0;
-        }
-        metricsRecord.update();
-      }
-      
-      synchronized void createFile() {
-        ++numFilesCreated;
+        numFilesCreated = 0;
+        numFilesOpened = 0;
+        numFilesRenamed = 0;
+        numFilesListed = 0;
       }
+      metricsRecord.update();
+    }
       
-      synchronized void openFile() {
-        ++numFilesOpened;
-      }
+    synchronized void createFile() {
+      ++numFilesCreated;
+    }
       
-      synchronized void renameFile() {
-        ++numFilesRenamed;
-      }
+    synchronized void openFile() {
+      ++numFilesOpened;
+    }
       
-      synchronized void listFile(int nfiles) {
-        numFilesListed += nfiles;
-      }
+    synchronized void renameFile() {
+      ++numFilesRenamed;
     }
-    
-    private NameNodeMetrics myMetrics = new NameNodeMetrics();
-    
-    /**
-     * Initialize the server
-     * 
-     * @param hostname which hostname to bind to
-     * @param port the port number to bind to
-     * @param conf the configuration
-     */
-    private void init(String hostname, int port, 
-                      Configuration conf
-                      ) throws IOException {
-      this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
-      this.server = RPC.getServer(this, hostname, port, handlerCount, 
-                                  false, conf);
-
-      // The rpc-server port can be ephemeral... ensure we have the correct info
-      this.nameNodeAddress = this.server.getListenerAddress(); 
-      conf.set("fs.default.name", new String(nameNodeAddress.getHostName() + ":" + nameNodeAddress.getPort()));
-      LOG.info("Namenode up at: " + this.nameNodeAddress);
-
-      try {
-        this.namesystem = new FSNamesystem(this.nameNodeAddress.getHostName(), this.nameNodeAddress.getPort(), this, conf);
-        this.server.start();  //start RPC server   
-  
-        this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
-        this.emptier.setDaemon(true);
-        this.emptier.start();
-      } catch (IOException e) {
-        this.server.stop();
-        throw e;
-      }
       
+    synchronized void listFile(int nfiles) {
+      numFilesListed += nfiles;
     }
+  }
     
-    /**
-     * Start NameNode.
-     * <p>
-     * The name-node can be started with one of the following startup options:
-     * <ul> 
-     * <li>{@link FSConstants.StartupOption#REGULAR REGULAR} - normal startup</li>
-     * <li>{@link FSConstants.StartupOption#FORMAT FORMAT} - format name node</li>
-     * <li>{@link FSConstants.StartupOption#UPGRADE UPGRADE} - start the cluster  
-     * upgrade and create a snapshot of the current file system state</li> 
-     * <li>{@link FSConstants.StartupOption#ROLLBACK ROLLBACK} - roll the  
-     *            cluster back to the previous state</li>
-     * </ul>
-     * The option is passed via configuration field: 
-     * <tt>dfs.namenode.startup</tt>
-     * 
-     * The conf will be modified to reflect the actual ports on which 
-     * the NameNode is up and running if the user passes the port as
-     * <code>zero</code> in the conf.
-     * 
-     * @param conf  confirguration
-     * @throws IOException
-     */
-    public NameNode(Configuration conf) throws IOException {
-      InetSocketAddress addr = 
-        DataNode.createSocketAddr(conf.get("fs.default.name"));
-      init( addr.getHostName(), addr.getPort(), conf );
-    }
-
-    /**
-     * Create a NameNode at the specified location and start it.
-     * 
-     * The conf will be modified to reflect the actual ports on which 
-     * the NameNode is up and running if the user passes the port as
-     * <code>zero</code>.  
-     */
-    public NameNode(String bindAddress, int port, 
+  private NameNodeMetrics myMetrics = new NameNodeMetrics();
+    
+  /**
+   * Initialize the server
+   * 
+   * @param hostname which hostname to bind to
+   * @param port the port number to bind to
+   * @param conf the configuration
+   */
+  private void init(String hostname, int port, 
                     Configuration conf
                     ) throws IOException {
-      init( bindAddress, port, conf );
-    }
-
-    /**
-     * Wait for service to finish.
-     * (Normally, it runs forever.)
-     */
-    public void join() {
-        try {
-            this.server.join();
-        } catch (InterruptedException ie) {
-        }
-    }
-
-    /**
-     * Stop all NameNode threads and wait for all to finish.
-    */
-    public void stop() {
-      if (! stopRequested) {
-        stopRequested = true;
-        namesystem.close();
-        emptier.interrupt();
-        server.stop();
-      }
+    this.handlerCount = conf.getInt("dfs.namenode.handler.count", 10);
+    this.server = RPC.getServer(this, hostname, port, handlerCount, 
+                                false, conf);
+
+    // The rpc-server port can be ephemeral... ensure we have the correct info
+    this.nameNodeAddress = this.server.getListenerAddress(); 
+    conf.set("fs.default.name", new String(nameNodeAddress.getHostName() + ":" + nameNodeAddress.getPort()));
+    LOG.info("Namenode up at: " + this.nameNodeAddress);
+
+    try {
+      this.namesystem = new FSNamesystem(this.nameNodeAddress.getHostName(), this.nameNodeAddress.getPort(), this, conf);
+      this.server.start();  //start RPC server   
+  
+      this.emptier = new Thread(new Trash(conf).getEmptier(), "Trash Emptier");
+      this.emptier.setDaemon(true);
+      this.emptier.start();
+    } catch (IOException e) {
+      this.server.stop();
+      throw e;
     }
-
-    /////////////////////////////////////////////////////
-    // ClientProtocol
-    /////////////////////////////////////////////////////
+      
+  }
     
-    /**
-     */
-    public LocatedBlock[] open(String src) throws IOException {
-        String clientMachine = Server.getRemoteAddress();
-        if ( clientMachine == null ) {
-            clientMachine = "";
-        }
-        Object openResults[] = namesystem.open(clientMachine, new UTF8(src));
-        if (openResults == null) {
-            throw new IOException("Cannot open filename " + src);
-        } else {
-            myMetrics.openFile();
-            Block blocks[] = (Block[]) openResults[0];
-            DatanodeInfo sets[][] = (DatanodeInfo[][]) openResults[1];
-            LocatedBlock results[] = new LocatedBlock[blocks.length];
-            for (int i = 0; i < blocks.length; i++) {
-                results[i] = new LocatedBlock(blocks[i], sets[i]);
-            }
-            return results;
-        }
-    }
-
-    /**
-     */
-    public LocatedBlock create(String src, 
-                               String clientName, 
-                               boolean overwrite,
-                               short replication,
-                               long blockSize
-    ) throws IOException {
-       String clientMachine = Server.getRemoteAddress();
-       if ( clientMachine == null ) {
-           clientMachine = "";
-       }
-       stateChangeLog.debug("*DIR* NameNode.create: file "
-            +src+" for "+clientName+" at "+clientMachine);
-       if (!checkPathLength(src)) {
-           throw new IOException("create: Pathname too long.  Limit " 
-               + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
-       }
-       Object results[] = namesystem.startFile(new UTF8(src), 
-                                                new UTF8(clientName), 
-                                                new UTF8(clientMachine), 
-                                                overwrite,
-                                                replication,
-                                                blockSize);
-       myMetrics.createFile();
-        Block b = (Block) results[0];
-        DatanodeInfo targets[] = (DatanodeInfo[]) results[1];
-        return new LocatedBlock(b, targets);
-    }
-
-    public boolean setReplication( String src, 
-                                short replication
-                              ) throws IOException {
-      return namesystem.setReplication( src, replication );
-    }
+  /**
+   * Start NameNode.
+   * <p>
+   * The name-node can be started with one of the following startup options:
+   * <ul> 
+   * <li>{@link FSConstants.StartupOption#REGULAR REGULAR} - normal startup</li>
+   * <li>{@link FSConstants.StartupOption#FORMAT FORMAT} - format name node</li>
+   * <li>{@link FSConstants.StartupOption#UPGRADE UPGRADE} - start the cluster  
+   * upgrade and create a snapshot of the current file system state</li> 
+   * <li>{@link FSConstants.StartupOption#ROLLBACK ROLLBACK} - roll the  
+   *            cluster back to the previous state</li>
+   * </ul>
+   * The option is passed via configuration field: 
+   * <tt>dfs.namenode.startup</tt>
+   * 
+   * The conf will be modified to reflect the actual ports on which 
+   * the NameNode is up and running if the user passes the port as
+   * <code>zero</code> in the conf.
+   * 
+   * @param conf  confirguration
+   * @throws IOException
+   */
+  public NameNode(Configuration conf) throws IOException {
+    InetSocketAddress addr = 
+      DataNode.createSocketAddr(conf.get("fs.default.name"));
+    init( addr.getHostName(), addr.getPort(), conf );
+  }
+
+  /**
+   * Create a NameNode at the specified location and start it.
+   * 
+   * The conf will be modified to reflect the actual ports on which 
+   * the NameNode is up and running if the user passes the port as
+   * <code>zero</code>.  
+   */
+  public NameNode(String bindAddress, int port, 
+                  Configuration conf
+                  ) throws IOException {
+    init( bindAddress, port, conf );
+  }
+
+  /**
+   * Wait for service to finish.
+   * (Normally, it runs forever.)
+   */
+  public void join() {
+    try {
+      this.server.join();
+    } catch (InterruptedException ie) {
+    }
+  }
+
+  /**
+   * Stop all NameNode threads and wait for all to finish.
+   */
+  public void stop() {
+    if (! stopRequested) {
+      stopRequested = true;
+      namesystem.close();
+      emptier.interrupt();
+      server.stop();
+    }
+  }
+
+  /////////////////////////////////////////////////////
+  // ClientProtocol
+  /////////////////////////////////////////////////////
     
-    /**
-     */
-    public LocatedBlock addBlock(String src, 
-                                 String clientName) throws IOException {
-        stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
-            +src+" for "+clientName);
-        UTF8 src8 = new UTF8(src);
-        UTF8 client8 = new UTF8(clientName);
-        Object[] results = namesystem.getAdditionalBlock(src8, client8);
-        Block b = (Block) results[0];
-        DatanodeInfo targets[] = (DatanodeInfo[]) results[1];
-        return new LocatedBlock(b, targets);            
-    }
-
-    /**
-     * The client needs to give up on the block.
-     */
-    public void abandonBlock(Block b, String src) throws IOException {
-        stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
-                +b.getBlockName()+" of file "+src );
-        if (! namesystem.abandonBlock(b, new UTF8(src))) {
-            throw new IOException("Cannot abandon block during write to " + src);
-        }
-    }
-    /**
-     */
-    public void abandonFileInProgress(String src, 
-                                      String holder) throws IOException {
-        stateChangeLog.debug("*DIR* NameNode.abandonFileInProgress:" + src );
-        namesystem.abandonFileInProgress(new UTF8(src), new UTF8(holder));
-    }
-    /**
-     */
-    public boolean complete(String src, String clientName) throws IOException {
-        stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName );
-        int returnCode = namesystem.completeFile(new UTF8(src), new UTF8(clientName));
-        if (returnCode == STILL_WAITING) {
-            return false;
-        } else if (returnCode == COMPLETE_SUCCESS) {
-            return true;
-        } else {
-            throw new IOException("Could not complete write to file " + src + " by " + clientName);
-        }
-    }
-
-    /**
-     * The client has detected an error on the specified located blocks 
-     * and is reporting them to the server.  For now, the namenode will 
-     * delete the blocks from the datanodes.  In the future we might 
-     * check the blocks are actually corrupt. 
-     */
-    public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
-      stateChangeLog.debug("*DIR* NameNode.reportBadBlocks");
+  /**
+   */
+  public LocatedBlock[] open(String src) throws IOException {
+    String clientMachine = Server.getRemoteAddress();
+    if ( clientMachine == null ) {
+      clientMachine = "";
+    }
+    Object openResults[] = namesystem.open(clientMachine, new UTF8(src));
+    if (openResults == null) {
+      throw new IOException("Cannot open filename " + src);
+    } else {
+      myMetrics.openFile();
+      Block blocks[] = (Block[]) openResults[0];
+      DatanodeInfo sets[][] = (DatanodeInfo[][]) openResults[1];
+      LocatedBlock results[] = new LocatedBlock[blocks.length];
       for (int i = 0; i < blocks.length; i++) {
-        Block blk = blocks[i].getBlock();
-        DatanodeInfo[] nodes = blocks[i].getLocations();
-        for (int j = 0; j < nodes.length; j++) {
-          DatanodeInfo dn = nodes[j];
-          namesystem.invalidateBlock(blk, dn);
-        }
+        results[i] = new LocatedBlock(blocks[i], sets[i]);
+      }
+      return results;
+    }
+  }
+
+  /**
+   */
+  public LocatedBlock create(String src, 
+                             String clientName, 
+                             boolean overwrite,
+                             short replication,
+                             long blockSize
+                             ) throws IOException {
+    String clientMachine = Server.getRemoteAddress();
+    if ( clientMachine == null ) {
+      clientMachine = "";
+    }
+    stateChangeLog.debug("*DIR* NameNode.create: file "
+                         +src+" for "+clientName+" at "+clientMachine);
+    if (!checkPathLength(src)) {
+      throw new IOException("create: Pathname too long.  Limit " 
+                            + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+    }
+    Object results[] = namesystem.startFile(new UTF8(src), 
+                                            new UTF8(clientName), 
+                                            new UTF8(clientMachine), 
+                                            overwrite,
+                                            replication,
+                                            blockSize);
+    myMetrics.createFile();
+    Block b = (Block) results[0];
+    DatanodeInfo targets[] = (DatanodeInfo[]) results[1];
+    return new LocatedBlock(b, targets);
+  }
+
+  public boolean setReplication( String src, 
+                                 short replication
+                                 ) throws IOException {
+    return namesystem.setReplication( src, replication );
+  }
+    
+  /**
+   */
+  public LocatedBlock addBlock(String src, 
+                               String clientName) throws IOException {
+    stateChangeLog.debug("*BLOCK* NameNode.addBlock: file "
+                         +src+" for "+clientName);
+    UTF8 src8 = new UTF8(src);
+    UTF8 client8 = new UTF8(clientName);
+    Object[] results = namesystem.getAdditionalBlock(src8, client8);
+    Block b = (Block) results[0];
+    DatanodeInfo targets[] = (DatanodeInfo[]) results[1];
+    return new LocatedBlock(b, targets);            
+  }
+
+  /**
+   * The client needs to give up on the block.
+   */
+  public void abandonBlock(Block b, String src) throws IOException {
+    stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
+                         +b.getBlockName()+" of file "+src );
+    if (! namesystem.abandonBlock(b, new UTF8(src))) {
+      throw new IOException("Cannot abandon block during write to " + src);
+    }
+  }
+  /**
+   */
+  public void abandonFileInProgress(String src, 
+                                    String holder) throws IOException {
+    stateChangeLog.debug("*DIR* NameNode.abandonFileInProgress:" + src );
+    namesystem.abandonFileInProgress(new UTF8(src), new UTF8(holder));
+  }
+  /**
+   */
+  public boolean complete(String src, String clientName) throws IOException {
+    stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName );
+    int returnCode = namesystem.completeFile(new UTF8(src), new UTF8(clientName));
+    if (returnCode == STILL_WAITING) {
+      return false;
+    } else if (returnCode == COMPLETE_SUCCESS) {
+      return true;
+    } else {
+      throw new IOException("Could not complete write to file " + src + " by " + clientName);
+    }
+  }
+
+  /**
+   * The client has detected an error on the specified located blocks 
+   * and is reporting them to the server.  For now, the namenode will 
+   * delete the blocks from the datanodes.  In the future we might 
+   * check the blocks are actually corrupt. 
+   */
+  public void reportBadBlocks(LocatedBlock[] blocks) throws IOException {
+    stateChangeLog.debug("*DIR* NameNode.reportBadBlocks");
+    for (int i = 0; i < blocks.length; i++) {
+      Block blk = blocks[i].getBlock();
+      DatanodeInfo[] nodes = blocks[i].getLocations();
+      for (int j = 0; j < nodes.length; j++) {
+        DatanodeInfo dn = nodes[j];
+        namesystem.invalidateBlock(blk, dn);
       }
     }
+  }
 
-    /**
-     */
-    public String[][] getHints(String src, long start, long len) throws IOException {
-      return namesystem.getDatanodeHints( src, start, len );
-    }
+  /**
+   */
+  public String[][] getHints(String src, long start, long len) throws IOException {
+    return namesystem.getDatanodeHints( src, start, len );
+  }
     
-    public long getBlockSize(String filename) throws IOException {
-      return namesystem.getBlockSize(filename);
-    }
+  public long getBlockSize(String filename) throws IOException {
+    return namesystem.getBlockSize(filename);
+  }
     
-    /**
-     */
-    public boolean rename(String src, String dst) throws IOException {
-        stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst );
-        if (!checkPathLength(dst)) {
-            throw new IOException("rename: Pathname too long.  Limit " 
-                + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
-        }
-        boolean ret = namesystem.renameTo(new UTF8(src), new UTF8(dst));
-        if (ret) {
-            myMetrics.renameFile();
-        }
-        return ret;
-    }
-
-    /**
-     */
-    public boolean delete(String src) throws IOException {
-        stateChangeLog.debug("*DIR* NameNode.delete: " + src );
-        return namesystem.delete(new UTF8(src));
-    }
-
-    /**
-     */
-    public boolean exists(String src) throws IOException {
-        return namesystem.exists(new UTF8(src));
-    }
-
-    /**
-     */
-    public boolean isDir(String src) throws IOException {
-        return namesystem.isDir(new UTF8(src));
-    }
-
-    /**
-     * Check path length does not exceed maximum.  Returns true if
-     * length and depth are okay.  Returns false if length is too long 
-     * or depth is too great.
-     * 
-     */
-    private boolean checkPathLength(String src) {
-        Path srcPath = new Path(src);
-        return (src.length() <= MAX_PATH_LENGTH &&
-                srcPath.depth() <= MAX_PATH_DEPTH);
-    }
+  /**
+   */
+  public boolean rename(String src, String dst) throws IOException {
+    stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst );
+    if (!checkPathLength(dst)) {
+      throw new IOException("rename: Pathname too long.  Limit " 
+                            + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+    }
+    boolean ret = namesystem.renameTo(new UTF8(src), new UTF8(dst));
+    if (ret) {
+      myMetrics.renameFile();
+    }
+    return ret;
+  }
+
+  /**
+   */
+  public boolean delete(String src) throws IOException {
+    stateChangeLog.debug("*DIR* NameNode.delete: " + src );
+    return namesystem.delete(new UTF8(src));
+  }
+
+  /**
+   */
+  public boolean exists(String src) throws IOException {
+    return namesystem.exists(new UTF8(src));
+  }
+
+  /**
+   */
+  public boolean isDir(String src) throws IOException {
+    return namesystem.isDir(new UTF8(src));
+  }
+
+  /**
+   * Check path length does not exceed maximum.  Returns true if
+   * length and depth are okay.  Returns false if length is too long 
+   * or depth is too great.
+   * 
+   */
+  private boolean checkPathLength(String src) {
+    Path srcPath = new Path(src);
+    return (src.length() <= MAX_PATH_LENGTH &&
+            srcPath.depth() <= MAX_PATH_DEPTH);
+  }
     
-    /**
-     */
-    public boolean mkdirs(String src) throws IOException {
-        stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src );
-        if (!checkPathLength(src)) {
-            throw new IOException("mkdirs: Pathname too long.  Limit " 
-                + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
-        }
-        return namesystem.mkdirs( src );
-    }
-
-    /** @deprecated */ @Deprecated
+  /**
+   */
+  public boolean mkdirs(String src) throws IOException {
+    stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src );
+    if (!checkPathLength(src)) {
+      throw new IOException("mkdirs: Pathname too long.  Limit " 
+                            + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
+    }
+    return namesystem.mkdirs( src );
+  }
+
+  /** @deprecated */ @Deprecated
     public boolean obtainLock(String src, String clientName, boolean exclusive) throws IOException {
-        int returnCode = namesystem.obtainLock(new UTF8(src), new UTF8(clientName), exclusive);
-        if (returnCode == COMPLETE_SUCCESS) {
-            return true;
-        } else if (returnCode == STILL_WAITING) {
-            return false;
-        } else {
-            throw new IOException("Failure when trying to obtain lock on " + src);
-        }
+    int returnCode = namesystem.obtainLock(new UTF8(src), new UTF8(clientName), exclusive);
+    if (returnCode == COMPLETE_SUCCESS) {
+      return true;
+    } else if (returnCode == STILL_WAITING) {
+      return false;
+    } else {
+      throw new IOException("Failure when trying to obtain lock on " + src);
     }
+  }
 
-    /** @deprecated */ @Deprecated
+  /** @deprecated */ @Deprecated
     public boolean releaseLock(String src, String clientName) throws IOException {
-        int returnCode = namesystem.releaseLock(new UTF8(src), new UTF8(clientName));
-        if (returnCode == COMPLETE_SUCCESS) {
-            return true;
-        } else if (returnCode == STILL_WAITING) {
-            return false;
-        } else {
-            throw new IOException("Failure when trying to release lock on " + src);
-        }
-    }
-
-    /**
-     */
-    public void renewLease(String clientName) throws IOException {
-        namesystem.renewLease(new UTF8(clientName));        
-    }
-
-    /**
-     */
-    public DFSFileInfo[] getListing(String src) throws IOException {
-        DFSFileInfo[] files = namesystem.getListing(new UTF8(src));
-        if (files != null) {
-            myMetrics.listFile(files.length);
-        }
-        return files;
-    }
-
-    /**
-     */
-    public long[] getStats() throws IOException {
-        long results[] = new long[2];
-        long totalCapacity = namesystem.totalCapacity();
-        results[0] = totalCapacity;
-        results[1] = totalCapacity - namesystem.totalRemaining();
-        return results;
-    }
-
-    /**
-     */
-    public DatanodeInfo[] getDatanodeReport() throws IOException {
-        DatanodeInfo results[] = namesystem.datanodeReport();
-        if (results == null || results.length == 0) {
-            throw new IOException("Cannot find datanode report");
-        }
-        return results;
-    }
+    int returnCode = namesystem.releaseLock(new UTF8(src), new UTF8(clientName));
+    if (returnCode == COMPLETE_SUCCESS) {
+      return true;
+    } else if (returnCode == STILL_WAITING) {
+      return false;
+    } else {
+      throw new IOException("Failure when trying to release lock on " + src);
+    }
+  }
+
+  /**
+   */
+  public void renewLease(String clientName) throws IOException {
+    namesystem.renewLease(new UTF8(clientName));        
+  }
+
+  /**
+   */
+  public DFSFileInfo[] getListing(String src) throws IOException {
+    DFSFileInfo[] files = namesystem.getListing(new UTF8(src));
+    if (files != null) {
+      myMetrics.listFile(files.length);
+    }
+    return files;
+  }
+
+  /**
+   */
+  public long[] getStats() throws IOException {
+    long results[] = new long[2];
+    long totalCapacity = namesystem.totalCapacity();
+    results[0] = totalCapacity;
+    results[1] = totalCapacity - namesystem.totalRemaining();
+    return results;
+  }
+
+  /**
+   */
+  public DatanodeInfo[] getDatanodeReport() throws IOException {
+    DatanodeInfo results[] = namesystem.datanodeReport();
+    if (results == null || results.length == 0) {
+      throw new IOException("Cannot find datanode report");
+    }
+    return results;
+  }
     
-    /**
-     * @inheritDoc
-     */
-    public boolean setSafeMode( SafeModeAction action ) throws IOException {
-      switch( action ) {
-      case SAFEMODE_LEAVE: // leave safe mode
-        namesystem.leaveSafeMode();
-        break;
-      case SAFEMODE_ENTER: // enter safe mode
-        namesystem.enterSafeMode();
-        break;
-      case SAFEMODE_GET: // get safe mode
-      }
-      return namesystem.isInSafeMode();
-    }
-
-    /**
-     * Is the cluster currently in safe mode?
-     */
-    boolean isInSafeMode() {
-      return namesystem.isInSafeMode();
-    }
-
-    /*
-     * Refresh the list of datanodes that the namenode should allow to  
-     * connect.  Uses the files list in the configuration to update the list. 
-     */
-    public void refreshNodes() throws IOException {
-      namesystem.refreshNodes();
-    }
-
-    /**
-     * Returns the size of the current edit log.
-     */
-    public long getEditLogSize() throws IOException {
-      return namesystem.getEditLogSize();
-    }
-
-    /**
-     * Roll the edit log.
-     */
-    public void rollEditLog() throws IOException {
-      namesystem.rollEditLog();
-    }
-
-    /**
-     * Roll the image 
-     */
-    public void rollFsImage() throws IOException {
-      namesystem.rollFSImage();
-    }
+  /**
+   * @inheritDoc
+   */
+  public boolean setSafeMode( SafeModeAction action ) throws IOException {
+    switch( action ) {
+    case SAFEMODE_LEAVE: // leave safe mode
+      namesystem.leaveSafeMode();
+      break;
+    case SAFEMODE_ENTER: // enter safe mode
+      namesystem.enterSafeMode();
+      break;
+    case SAFEMODE_GET: // get safe mode
+    }
+    return namesystem.isInSafeMode();
+  }
+
+  /**
+   * Is the cluster currently in safe mode?
+   */
+  boolean isInSafeMode() {
+    return namesystem.isInSafeMode();
+  }
+
+  /*
+   * Refresh the list of datanodes that the namenode should allow to  
+   * connect.  Uses the files list in the configuration to update the list. 
+   */
+  public void refreshNodes() throws IOException {
+    namesystem.refreshNodes();
+  }
+
+  /**
+   * Returns the size of the current edit log.
+   */
+  public long getEditLogSize() throws IOException {
+    return namesystem.getEditLogSize();
+  }
+
+  /**
+   * Roll the edit log.
+   */
+  public void rollEditLog() throws IOException {
+    namesystem.rollEditLog();
+  }
+
+  /**
+   * Roll the image 
+   */
+  public void rollFsImage() throws IOException {
+    namesystem.rollFSImage();
+  }
     
-    public void finalizeUpgrade() throws IOException {
-      getFSImage().finalizeUpgrade();
-    }
-
-    /**
-     * Dumps namenode state into specified file
-     */
-    public void metaSave(String filename) throws IOException {
-      namesystem.metaSave(filename);
-    }
-
-    ////////////////////////////////////////////////////////////////
-    // DatanodeProtocol
-    ////////////////////////////////////////////////////////////////
-    /** 
-     */
-    public DatanodeRegistration register( DatanodeRegistration nodeReg,
-                                          String networkLocation
+  public void finalizeUpgrade() throws IOException {
+    getFSImage().finalizeUpgrade();
+  }
+
+  /**
+   * Dumps namenode state into specified file
+   */
+  public void metaSave(String filename) throws IOException {
+    namesystem.metaSave(filename);
+  }
+
+  ////////////////////////////////////////////////////////////////
+  // DatanodeProtocol
+  ////////////////////////////////////////////////////////////////
+  /** 
+   */
+  public DatanodeRegistration register( DatanodeRegistration nodeReg,
+                                        String networkLocation
                                         ) throws IOException {
-      verifyVersion( nodeReg.getVersion() );
-      namesystem.registerDatanode( nodeReg, networkLocation );
+    verifyVersion( nodeReg.getVersion() );
+    namesystem.registerDatanode( nodeReg, networkLocation );
       
-      return nodeReg;
-    }
+    return nodeReg;
+  }
     
-    /**
-     * Data node notify the name node that it is alive 
-     * Return a block-oriented command for the datanode to execute.
-     * This will be either a transfer or a delete operation.
-     */
-    public DatanodeCommand sendHeartbeat( DatanodeRegistration nodeReg,
-                                          long capacity, 
-                                          long remaining,
-                                          int xmitsInProgress,
-                                          int xceiverCount) throws IOException {
-        Object xferResults[] = new Object[2];
-        xferResults[0] = xferResults[1] = null;
-        Object deleteList[] = new Object[1];
-        deleteList[0] = null; 
-
-        verifyRequest( nodeReg );
-        if( namesystem.gotHeartbeat( nodeReg, capacity, remaining, 
-                                     xceiverCount, 
-                                     xmitsInProgress,
-                                     xferResults,
-                                     deleteList)) {
-          // request block report from the datanode
-          assert(xferResults[0] == null && deleteList[0] == null);
-          return new DatanodeCommand( DataNodeAction.DNA_REGISTER );
-        }
-        
-        //
-        // Ask to perform pending transfers, if any
-        //
-        if (xferResults[0] != null) {
-            assert(deleteList[0] == null);
-            return new BlockCommand((Block[]) xferResults[0], (DatanodeInfo[][]) xferResults[1]);
-        }
-
-        //
-        // If there are no transfers, check for recently-deleted blocks that
-        // should be removed.  This is not a full-datanode sweep, as is done during
-        // a block report.  This is just a small fast removal of blocks that have
-        // just been removed.
-        //
-        if (deleteList[0] != null) {
-            return new BlockCommand((Block[]) deleteList[0]);
-        }
-        return null;
-    }
-
-    public DatanodeCommand blockReport( DatanodeRegistration nodeReg,
-                                        Block blocks[]) throws IOException {
-        verifyRequest( nodeReg );
-        stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
-                +"from "+nodeReg.getName()+" "+blocks.length+" blocks" );
-
-        Block blocksToDelete[] = namesystem.processReport( nodeReg, blocks );
-        if( blocksToDelete != null && blocksToDelete.length > 0 )
-            return new BlockCommand( blocksToDelete );
-        if( getFSImage().isUpgradeFinalized() )
-          return new DatanodeCommand( DataNodeAction.DNA_FINALIZE );
-        return null;
-    }
-
-    public void blockReceived(DatanodeRegistration nodeReg, 
-                              Block blocks[]) throws IOException {
-        verifyRequest( nodeReg );
-        stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
-                +"from "+nodeReg.getName()+" "+blocks.length+" blocks." );
-        for (int i = 0; i < blocks.length; i++) {
-            namesystem.blockReceived( nodeReg, blocks[i] );
-        }
-    }
-
-    /**
-     */
-    public void errorReport(DatanodeRegistration nodeReg,
-                            int errorCode, 
-                            String msg) throws IOException {
-      // Log error message from datanode
-      LOG.info("Report from " + nodeReg.getName() + ": " + msg);
-      if( errorCode == DatanodeProtocol.NOTIFY ) {
-        return;
-      }
-      verifyRequest( nodeReg );
-      if( errorCode == DatanodeProtocol.DISK_ERROR ) {
-          namesystem.removeDatanode( nodeReg );            
-      }
+  /**
+   * Data node notify the name node that it is alive 
+   * Return a block-oriented command for the datanode to execute.
+   * This will be either a transfer or a delete operation.
+   */
+  public DatanodeCommand sendHeartbeat( DatanodeRegistration nodeReg,
+                                        long capacity, 
+                                        long remaining,
+                                        int xmitsInProgress,
+                                        int xceiverCount) throws IOException {
+    Object xferResults[] = new Object[2];
+    xferResults[0] = xferResults[1] = null;
+    Object deleteList[] = new Object[1];
+    deleteList[0] = null; 
+
+    verifyRequest( nodeReg );
+    if( namesystem.gotHeartbeat( nodeReg, capacity, remaining, 
+                                 xceiverCount, 
+                                 xmitsInProgress,
+                                 xferResults,
+                                 deleteList)) {
+      // request block report from the datanode
+      assert(xferResults[0] == null && deleteList[0] == null);
+      return new DatanodeCommand( DataNodeAction.DNA_REGISTER );
     }
+        
+    //
+    // Ask to perform pending transfers, if any
+    //
+    if (xferResults[0] != null) {
+      assert(deleteList[0] == null);
+      return new BlockCommand((Block[]) xferResults[0], (DatanodeInfo[][]) xferResults[1]);
+    }
+
+    //
+    // If there are no transfers, check for recently-deleted blocks that
+    // should be removed.  This is not a full-datanode sweep, as is done during
+    // a block report.  This is just a small fast removal of blocks that have
+    // just been removed.
+    //
+    if (deleteList[0] != null) {
+      return new BlockCommand((Block[]) deleteList[0]);
+    }
+    return null;
+  }
+
+  public DatanodeCommand blockReport( DatanodeRegistration nodeReg,
+                                      Block blocks[]) throws IOException {
+    verifyRequest( nodeReg );
+    stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
+                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks" );
+
+    Block blocksToDelete[] = namesystem.processReport( nodeReg, blocks );
+    if( blocksToDelete != null && blocksToDelete.length > 0 )
+      return new BlockCommand( blocksToDelete );
+    if( getFSImage().isUpgradeFinalized() )
+      return new DatanodeCommand( DataNodeAction.DNA_FINALIZE );
+    return null;
+  }
+
+  public void blockReceived(DatanodeRegistration nodeReg, 
+                            Block blocks[]) throws IOException {
+    verifyRequest( nodeReg );
+    stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
+                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks." );
+    for (int i = 0; i < blocks.length; i++) {
+      namesystem.blockReceived( nodeReg, blocks[i] );
+    }
+  }
+
+  /**
+   */
+  public void errorReport(DatanodeRegistration nodeReg,
+                          int errorCode, 
+                          String msg) throws IOException {
+    // Log error message from datanode
+    LOG.info("Report from " + nodeReg.getName() + ": " + msg);
+    if( errorCode == DatanodeProtocol.NOTIFY ) {
+      return;
+    }
+    verifyRequest( nodeReg );
+    if( errorCode == DatanodeProtocol.DISK_ERROR ) {
+      namesystem.removeDatanode( nodeReg );            
+    }
+  }
     
-    public NamespaceInfo versionRequest() throws IOException {
-      return namesystem.getNamespaceInfo();
-    }
-
-    /** 
-     * Verify request.
-     * 
-     * Verifies correctness of the datanode version, registration ID, and 
-     * if the datanode does not need to be shutdown.
-     * 
-     * @param nodeReg data node registration
-     * @throws IOException
-     */
-    public void verifyRequest( DatanodeRegistration nodeReg ) throws IOException {
-      verifyVersion( nodeReg.getVersion() );
-      if( ! namesystem.getRegistrationID().equals( nodeReg.getRegistrationID() ))
-          throw new UnregisteredDatanodeException( nodeReg );
-    }
+  public NamespaceInfo versionRequest() throws IOException {
+    return namesystem.getNamespaceInfo();
+  }
+
+  /** 
+   * Verify request.
+   * 
+   * Verifies correctness of the datanode version, registration ID, and 
+   * if the datanode does not need to be shutdown.
+   * 
+   * @param nodeReg data node registration
+   * @throws IOException
+   */
+  public void verifyRequest( DatanodeRegistration nodeReg ) throws IOException {
+    verifyVersion( nodeReg.getVersion() );
+    if( ! namesystem.getRegistrationID().equals( nodeReg.getRegistrationID() ))
+      throw new UnregisteredDatanodeException( nodeReg );
+  }
     
-    /**
-     * Verify version.
-     * 
-     * @param version
-     * @throws IOException
-     */
-    public void verifyVersion( int version ) throws IOException {
-      if( version != LAYOUT_VERSION )
-        throw new IncorrectVersionException( version, "data node" );
-    }
-
-    /**
-     * Returns the name of the fsImage file
-     */
-    public File getFsImageName() throws IOException {
-      return getFSImage().getFsImageName();
-    }
+  /**
+   * Verify version.
+   * 
+   * @param version
+   * @throws IOException
+   */
+  public void verifyVersion( int version ) throws IOException {
+    if( version != LAYOUT_VERSION )
+      throw new IncorrectVersionException( version, "data node" );
+  }
+
+  /**
+   * Returns the name of the fsImage file
+   */
+  public File getFsImageName() throws IOException {
+    return getFSImage().getFsImageName();
+  }
     
-    FSImage getFSImage() {
-      return namesystem.dir.fsImage;
-    }
-
-    /**
-     * Returns the name of the fsImage file uploaded by periodic
-     * checkpointing
-     */
-    public File[] getFsImageNameCheckpoint() throws IOException {
-      return getFSImage().getFsImageNameCheckpoint();
-    }
-
-    /**
-     * Returns the name of the edits file
-     */
-    public File getFsEditName() throws IOException {
-      return namesystem.getFsEditName();
-    }
-
-    /**
-     * Returns the address on which the NameNodes is listening to.
-     * @return the address on which the NameNodes is listening to.
-     */
-    public InetSocketAddress getNameNodeAddress() {
-      return nameNodeAddress;
-    }
-
-    /**
-     * Verify that configured directories exist, then
-     * Interactively confirm that formatting is desired 
-     * for each existing directory and format them.
-     * 
-     * @param conf
-     * @param isConfirmationNeeded
-     * @return true if formatting was aborted, false otherwise
-     * @throws IOException
-     */
-    private static boolean format(Configuration conf,
-                                  boolean isConfirmationNeeded
+  FSImage getFSImage() {
+    return namesystem.dir.fsImage;
+  }
+
+  /**
+   * Returns the name of the fsImage file uploaded by periodic
+   * checkpointing
+   */
+  public File[] getFsImageNameCheckpoint() throws IOException {
+    return getFSImage().getFsImageNameCheckpoint();
+  }
+
+  /**
+   * Returns the name of the edits file
+   */
+  public File getFsEditName() throws IOException {
+    return namesystem.getFsEditName();
+  }
+
+  /**
+   * Returns the address on which the NameNodes is listening to.
+   * @return the address on which the NameNodes is listening to.
+   */
+  public InetSocketAddress getNameNodeAddress() {
+    return nameNodeAddress;
+  }
+
+  /**
+   * Verify that configured directories exist, then
+   * Interactively confirm that formatting is desired 
+   * for each existing directory and format them.
+   * 
+   * @param conf
+   * @param isConfirmationNeeded
+   * @return true if formatting was aborted, false otherwise
+   * @throws IOException
+   */
+  private static boolean format(Configuration conf,
+                                boolean isConfirmationNeeded
                                 ) throws IOException {
-      Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs( conf );
-      for( Iterator<File> it = dirsToFormat.iterator(); it.hasNext(); ) {
-        File curDir = it.next();
-        if( ! curDir.exists() )
-          continue;
-        if( isConfirmationNeeded ) {
-          System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) ");
-          if (!(System.in.read() == 'Y')) {
-            System.err.println("Format aborted in "+ curDir);
-            return true;
-          }
-          while( System.in.read() != '\n' ); // discard the enter-key
+    Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs( conf );
+    for( Iterator<File> it = dirsToFormat.iterator(); it.hasNext(); ) {
+      File curDir = it.next();
+      if( ! curDir.exists() )
+        continue;
+      if( isConfirmationNeeded ) {
+        System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) ");
+        if (!(System.in.read() == 'Y')) {
+          System.err.println("Format aborted in "+ curDir);
+          return true;
         }
+        while( System.in.read() != '\n' ); // discard the enter-key
       }
-
-      FSNamesystem nsys = new FSNamesystem(new FSImage( dirsToFormat ));
-      nsys.dir.fsImage.format();
-      return false;
     }
 
-    private static void printUsage() {
-      System.err.println(
-      "Usage: java NameNode [-format] | [-upgrade] | [-rollback]");
-    }
-
-    private static StartupOption parseArguments(String args[], 
-                                                Configuration conf ) {
-      int argsLen = (args == null) ? 0 : args.length;
-      StartupOption startOpt = StartupOption.REGULAR;
-      for( int i=0; i < argsLen; i++ ) {
-        String cmd = args[i];
-        if( "-format".equalsIgnoreCase(cmd) ) {
-          startOpt = StartupOption.FORMAT;
-        } else if( "-regular".equalsIgnoreCase(cmd) ) {
-          startOpt = StartupOption.REGULAR;
-        } else if( "-upgrade".equalsIgnoreCase(cmd) ) {
-          startOpt = StartupOption.UPGRADE;
-        } else if( "-rollback".equalsIgnoreCase(cmd) ) {
-          startOpt = StartupOption.ROLLBACK;
-        } else
-          return null;
-      }
-      conf.setObject( "dfs.namenode.startup", startOpt );
-      return startOpt;
+    FSNamesystem nsys = new FSNamesystem(new FSImage( dirsToFormat ));
+    nsys.dir.fsImage.format();
+    return false;
+  }
+
+  private static void printUsage() {
+    System.err.println(
+                       "Usage: java NameNode [-format] | [-upgrade] | [-rollback]");
+  }
+
+  private static StartupOption parseArguments(String args[], 
+                                              Configuration conf ) {
+    int argsLen = (args == null) ? 0 : args.length;
+    StartupOption startOpt = StartupOption.REGULAR;
+    for( int i=0; i < argsLen; i++ ) {
+      String cmd = args[i];
+      if( "-format".equalsIgnoreCase(cmd) ) {
+        startOpt = StartupOption.FORMAT;
+      } else if( "-regular".equalsIgnoreCase(cmd) ) {
+        startOpt = StartupOption.REGULAR;
+      } else if( "-upgrade".equalsIgnoreCase(cmd) ) {
+        startOpt = StartupOption.UPGRADE;
+      } else if( "-rollback".equalsIgnoreCase(cmd) ) {
+        startOpt = StartupOption.ROLLBACK;
+      } else
+        return null;
     }
+    conf.setObject( "dfs.namenode.startup", startOpt );
+    return startOpt;
+  }
 
-    static NameNode createNameNode( String argv[], 
-                                    Configuration conf ) throws IOException {
-      if( conf == null )
-        conf = new Configuration();
-      StartupOption startOpt = parseArguments( argv, conf );
-      if( startOpt == null ) {
-        printUsage();
-        return null;
-      }
-      
-      if( startOpt == StartupOption.FORMAT ) {
-        boolean aborted = format( conf, true );
-        System.exit(aborted ? 1 : 0);
-      }
+  static NameNode createNameNode( String argv[], 
+                                  Configuration conf ) throws IOException {
+    if( conf == null )
+      conf = new Configuration();
+    StartupOption startOpt = parseArguments( argv, conf );
+    if( startOpt == null ) {
+      printUsage();
+      return null;
+    }
       
-      NameNode namenode = new NameNode(conf);
-      return namenode;
+    if( startOpt == StartupOption.FORMAT ) {
+      boolean aborted = format( conf, true );
+      System.exit(aborted ? 1 : 0);
     }
+      
+    NameNode namenode = new NameNode(conf);
+    return namenode;
+  }
     
-    /**
-     */
-    public static void main(String argv[]) throws Exception {
-      try {
-        NameNode namenode = createNameNode( argv, null );
-        if( namenode != null )
-          namenode.join();
-      } catch ( Throwable e ) {
-        LOG.error( StringUtils.stringifyException( e ) );
-        System.exit(-1);
-      }
-    }
+  /**
+   */
+  public static void main(String argv[]) throws Exception {
+    try {
+      NameNode namenode = createNameNode( argv, null );
+      if( namenode != null )
+        namenode.join();
+    } catch ( Throwable e ) {
+      LOG.error( StringUtils.stringifyException( e ) );
+      System.exit(-1);
+    }
+  }
 }

+ 1 - 1
src/java/org/apache/hadoop/dfs/PendingReplicationBlocks.java

@@ -129,7 +129,7 @@ class PendingReplicationBlocks {
         return null;
       }
       Block[] blockList = timedOutItems.toArray(
-                            new Block[timedOutItems.size()]);
+                                                new Block[timedOutItems.size()]);
       timedOutItems.clear();
       return blockList;
     }

+ 374 - 374
src/java/org/apache/hadoop/dfs/SecondaryNameNode.java

@@ -50,421 +50,421 @@ import javax.servlet.http.HttpServletResponse;
  **********************************************************/
 public class SecondaryNameNode implements FSConstants, Runnable {
     
-    public static final Log LOG = LogFactory.getLog(
-                                  "org.apache.hadoop.dfs.NameNode.Secondary");
-    private static final String SRC_FS_IMAGE = "srcimage.tmp";
-    private static final String FS_EDITS = "edits.tmp";
-    private static final String DEST_FS_IMAGE = "destimage.tmp";
-
-    private ClientProtocol namenode;
-    private Configuration conf;
-    private InetSocketAddress nameNodeAddr;
-    private boolean shouldRun;
-    private StatusHttpServer infoServer;
-    private int infoPort;
-    private String infoBindAddress;
-
-    private File checkpointDir;
-    private long checkpointPeriod;	// in seconds
-    private long checkpointSize;    // size (in MB) of current Edit Log
-    private File srcImage;
-    private File destImage;
-    private File editFile;
-
-    private boolean[] simulation = null; // error simulation events
-
-    /**
-     * Create a connection to the primary namenode.
-     */
-    public SecondaryNameNode(Configuration conf)  throws IOException {
+  public static final Log LOG = LogFactory.getLog(
+                                                  "org.apache.hadoop.dfs.NameNode.Secondary");
+  private static final String SRC_FS_IMAGE = "srcimage.tmp";
+  private static final String FS_EDITS = "edits.tmp";
+  private static final String DEST_FS_IMAGE = "destimage.tmp";
+
+  private ClientProtocol namenode;
+  private Configuration conf;
+  private InetSocketAddress nameNodeAddr;
+  private boolean shouldRun;
+  private StatusHttpServer infoServer;
+  private int infoPort;
+  private String infoBindAddress;
+
+  private File checkpointDir;
+  private long checkpointPeriod;	// in seconds
+  private long checkpointSize;    // size (in MB) of current Edit Log
+  private File srcImage;
+  private File destImage;
+  private File editFile;
+
+  private boolean[] simulation = null; // error simulation events
+
+  /**
+   * Create a connection to the primary namenode.
+   */
+  public SecondaryNameNode(Configuration conf)  throws IOException {
 
-      //
-      // initialize error simulation code for junit test
-      //
-      initializeErrorSimulationEvent(2);
-
-      //
-      // Create connection to the namenode.
-      //
-      shouldRun = true;
-      nameNodeAddr = DataNode.createSocketAddr(
-                                 conf.get("fs.default.name", "local"));
-      this.conf = conf;
-      this.namenode = (ClientProtocol) RPC.getProxy(ClientProtocol.class,
-                       ClientProtocol.versionID, nameNodeAddr, conf);
-
-      //
-      // initialize the webserver for uploading files.
-      //
-      infoPort = conf.getInt("dfs.secondary.info.port", 50090);
-      infoBindAddress = conf.get("dfs.secondary.info.bindAddress", "0.0.0.0");
-      infoServer = new StatusHttpServer("dfs", infoBindAddress, infoPort, false);
-      infoServer.setAttribute("name.secondary", this);
-      infoServer.addServlet("getimage", "/getimage", GetImageServlet.class);
-      infoServer.start();
+    //
+    // initialize error simulation code for junit test
+    //
+    initializeErrorSimulationEvent(2);
 
-      //
-      // Initialize other scheduling parameters from the configuration
-      //
-      String[] dirName = conf.getStrings("fs.checkpoint.dir");
-      checkpointDir = new File(dirName[0]);
-      checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
-      checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);
-      doSetup();
-
-      LOG.warn("Checkpoint Directory:" + checkpointDir);
-      LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
-               "(" + checkpointPeriod/60 + " min)");
-      LOG.warn("Log Size Trigger    :" + checkpointSize + " bytes " +
-               "(" + checkpointSize/1024 + " KB)");
-    }
+    //
+    // Create connection to the namenode.
+    //
+    shouldRun = true;
+    nameNodeAddr = DataNode.createSocketAddr(
+                                             conf.get("fs.default.name", "local"));
+    this.conf = conf;
+    this.namenode = (ClientProtocol) RPC.getProxy(ClientProtocol.class,
+                                                  ClientProtocol.versionID, nameNodeAddr, conf);
 
-   /**
-     * Shut down this instance of the datanode.
-     * Returns only after shutdown is complete.
-     */
-    public void shutdown() {
-      shouldRun = false;
-      try {
-          infoServer.stop();
-      } catch (Exception e) {
-      }
-    }
+    //
+    // initialize the webserver for uploading files.
+    //
+    infoPort = conf.getInt("dfs.secondary.info.port", 50090);
+    infoBindAddress = conf.get("dfs.secondary.info.bindAddress", "0.0.0.0");
+    infoServer = new StatusHttpServer("dfs", infoBindAddress, infoPort, false);
+    infoServer.setAttribute("name.secondary", this);
+    infoServer.addServlet("getimage", "/getimage", GetImageServlet.class);
+    infoServer.start();
 
-    private void doSetup() throws IOException {
-      //
-      // Create the checkpoint directory if needed. 
-      //
-      checkpointDir.mkdirs();
-      srcImage = new File(checkpointDir, SRC_FS_IMAGE);
-      destImage = new File(checkpointDir, DEST_FS_IMAGE);
-      editFile = new File(checkpointDir, FS_EDITS);
-      srcImage.delete();
-      destImage.delete();
-      editFile.delete();
+    //
+    // Initialize other scheduling parameters from the configuration
+    //
+    String[] dirName = conf.getStrings("fs.checkpoint.dir");
+    checkpointDir = new File(dirName[0]);
+    checkpointPeriod = conf.getLong("fs.checkpoint.period", 3600);
+    checkpointSize = conf.getLong("fs.checkpoint.size", 4194304);
+    doSetup();
+
+    LOG.warn("Checkpoint Directory:" + checkpointDir);
+    LOG.warn("Checkpoint Period   :" + checkpointPeriod + " secs " +
+             "(" + checkpointPeriod/60 + " min)");
+    LOG.warn("Log Size Trigger    :" + checkpointSize + " bytes " +
+             "(" + checkpointSize/1024 + " KB)");
+  }
+
+  /**
+   * Shut down this instance of the datanode.
+   * Returns only after shutdown is complete.
+   */
+  public void shutdown() {
+    shouldRun = false;
+    try {
+      infoServer.stop();
+    } catch (Exception e) {
     }
+  }
 
-    File getNewImage() {
-      return destImage;
-    }
+  private void doSetup() throws IOException {
+    //
+    // Create the checkpoint directory if needed. 
+    //
+    checkpointDir.mkdirs();
+    srcImage = new File(checkpointDir, SRC_FS_IMAGE);
+    destImage = new File(checkpointDir, DEST_FS_IMAGE);
+    editFile = new File(checkpointDir, FS_EDITS);
+    srcImage.delete();
+    destImage.delete();
+    editFile.delete();
+  }
+
+  File getNewImage() {
+    return destImage;
+  }
+
+  //
+  // The main work loop
+  //
+  public void run() {
 
     //
-    // The main work loop
+    // Poll the Namenode (once every 5 minutes) to find the size of the
+    // pending edit log.
     //
-    public void run() {
+    long period = 5 * 60;              // 5 minutes
+    long lastCheckpointTime = 0;
+    if (checkpointPeriod < period) {
+      period = checkpointPeriod;
+    }
 
-      //
-      // Poll the Namenode (once every 5 minutes) to find the size of the
-      // pending edit log.
-      //
-      long period = 5 * 60;              // 5 minutes
-      long lastCheckpointTime = 0;
-      if (checkpointPeriod < period) {
-        period = checkpointPeriod;
+    while (shouldRun) {
+      try {
+        Thread.sleep(1000 * period);
+      } catch (InterruptedException ie) {
+        // do nothing
+      }
+      if (!shouldRun) {
+        break;
       }
+      try {
+        long now = System.currentTimeMillis();
 
-      while (shouldRun) {
-        try {
-            Thread.sleep(1000 * period);
-        } catch (InterruptedException ie) {
-          // do nothing
-        }
-        if (!shouldRun) {
-          break;
-        }
-        try {
-          long now = System.currentTimeMillis();
-
-          long size = namenode.getEditLogSize();
-          if (size >= checkpointSize || 
-              now >= lastCheckpointTime + 1000 * checkpointPeriod) {
-            doCheckpoint();
-            lastCheckpointTime = now;
-          }
-        } catch (IOException e) {
-          LOG.error("Exception in doCheckpoint:");
-          LOG.error(StringUtils.stringifyException(e));
-          e.printStackTrace();
+        long size = namenode.getEditLogSize();
+        if (size >= checkpointSize || 
+            now >= lastCheckpointTime + 1000 * checkpointPeriod) {
+          doCheckpoint();
+          lastCheckpointTime = now;
         }
+      } catch (IOException e) {
+        LOG.error("Exception in doCheckpoint:");
+        LOG.error(StringUtils.stringifyException(e));
+        e.printStackTrace();
       }
     }
-
-    /**
-     * get the current fsimage from Namenode.
-     */
-     private void getFSImage() throws IOException {
-       String fsName = getInfoServer();
-       String fileid = "getimage=1";
-       TransferFsImage.getFileClient(fsName, fileid, srcImage);
-       LOG.info("Downloaded file " + srcImage + " size " +
-                srcImage.length() + " bytes.");
-    }
-
-    /**
-     * get the old edits file from the NameNode
-     */
-     private void getFSEdits() throws IOException {
-       String fsName = getInfoServer();
-       String fileid = "getedit=1";
-       TransferFsImage.getFileClient(fsName, fileid, editFile);
-       LOG.info("Downloaded file " + editFile + " size " +
-                editFile.length() + " bytes.");
-    }
-
-    /**
-     * Copy the new fsimage into the NameNode
-     */
-     private void putFSImage() throws IOException {
-       String fsName = getInfoServer();
-       String fileid = "putimage=1&port=" + infoPort +
-                       "&machine=" +
-                       InetAddress.getLocalHost().getHostAddress();
-       LOG.info("Posted URL " + fsName + fileid);
-       TransferFsImage.getFileClient(fsName, fileid, (File[])null);
-     }
-
-    /*
-     * Returns the Jetty server that the Namenode is listening on.
-     */
-    private String getInfoServer() throws IOException {
-      String fsName = conf.get("fs.default.name", "local");
-      if (fsName.equals("local")) {
-        throw new IOException("This is not a DFS");
-      }
-      String[] splits = fsName.split(":", 2);
-      int infoPort = conf.getInt("dfs.info.port", 50070);
-      return splits[0]+":"+infoPort;
+  }
+
+  /**
+   * get the current fsimage from Namenode.
+   */
+  private void getFSImage() throws IOException {
+    String fsName = getInfoServer();
+    String fileid = "getimage=1";
+    TransferFsImage.getFileClient(fsName, fileid, srcImage);
+    LOG.info("Downloaded file " + srcImage + " size " +
+             srcImage.length() + " bytes.");
+  }
+
+  /**
+   * get the old edits file from the NameNode
+   */
+  private void getFSEdits() throws IOException {
+    String fsName = getInfoServer();
+    String fileid = "getedit=1";
+    TransferFsImage.getFileClient(fsName, fileid, editFile);
+    LOG.info("Downloaded file " + editFile + " size " +
+             editFile.length() + " bytes.");
+  }
+
+  /**
+   * Copy the new fsimage into the NameNode
+   */
+  private void putFSImage() throws IOException {
+    String fsName = getInfoServer();
+    String fileid = "putimage=1&port=" + infoPort +
+      "&machine=" +
+      InetAddress.getLocalHost().getHostAddress();
+    LOG.info("Posted URL " + fsName + fileid);
+    TransferFsImage.getFileClient(fsName, fileid, (File[])null);
+  }
+
+  /*
+   * Returns the Jetty server that the Namenode is listening on.
+   */
+  private String getInfoServer() throws IOException {
+    String fsName = conf.get("fs.default.name", "local");
+    if (fsName.equals("local")) {
+      throw new IOException("This is not a DFS");
     }
+    String[] splits = fsName.split(":", 2);
+    int infoPort = conf.getInt("dfs.info.port", 50070);
+    return splits[0]+":"+infoPort;
+  }
 
-    /*
-     * Create a new checkpoint
-     */
-    void doCheckpoint() throws IOException {
+  /*
+   * Create a new checkpoint
+   */
+  void doCheckpoint() throws IOException {
 
-      //
-      // Do the required initialization of the merge work area.
-      //
-      doSetup();
+    //
+    // Do the required initialization of the merge work area.
+    //
+    doSetup();
 
-      //
-      // Tell the namenode to start logging transactions in a new edit file
-      //
-      namenode.rollEditLog();
+    //
+    // Tell the namenode to start logging transactions in a new edit file
+    //
+    namenode.rollEditLog();
 
-      //
-      // error simulation code for junit test
-      //
-      if (simulation != null && simulation[0]) {
-        throw new IOException("Simulating error0 " +
-                              "after creating edits.new");
-      }
+    //
+    // error simulation code for junit test
+    //
+    if (simulation != null && simulation[0]) {
+      throw new IOException("Simulating error0 " +
+                            "after creating edits.new");
+    }
 
-      getFSImage();                // Fetch fsimage
-      getFSEdits();                // Fetch edist
-      doMerge();                   // Do the merge
+    getFSImage();                // Fetch fsimage
+    getFSEdits();                // Fetch edist
+    doMerge();                   // Do the merge
   
-      //
-      // Upload the new image into the NameNode. Then tell the Namenode
-      // to make this new uploaded image as the most current image.
-      //
-      putFSImage();
-
-      //
-      // error simulation code for junit test
-      //
-      if (simulation != null && simulation[1]) {
-        throw new IOException("Simulating error1 " +
-                              "after uploading new image to NameNode");
-      }
-
-      namenode.rollFsImage();
+    //
+    // Upload the new image into the NameNode. Then tell the Namenode
+    // to make this new uploaded image as the most current image.
+    //
+    putFSImage();
 
-      LOG.warn("Checkpoint done. Image Size:" + srcImage.length() +
-               " Edit Size:" + editFile.length() +
-               " New Image Size:" + destImage.length());
+    //
+    // error simulation code for junit test
+    //
+    if (simulation != null && simulation[1]) {
+      throw new IOException("Simulating error1 " +
+                            "after uploading new image to NameNode");
     }
 
-    /**
-     * merges SRC_FS_IMAGE with FS_EDITS and writes the output into
-     * DEST_FS_IMAGE
-     */
-    private void doMerge() throws IOException {
-      FSNamesystem namesystem = new FSNamesystem(
-                                    new FSImage(checkpointDir));
-      FSImage fsImage = namesystem.dir.fsImage;
-      fsImage.loadFSImage(srcImage);
-      fsImage.getEditLog().loadFSEdits(editFile);
-      fsImage.saveFSImage(destImage);
+    namenode.rollFsImage();
+
+    LOG.warn("Checkpoint done. Image Size:" + srcImage.length() +
+             " Edit Size:" + editFile.length() +
+             " New Image Size:" + destImage.length());
+  }
+
+  /**
+   * merges SRC_FS_IMAGE with FS_EDITS and writes the output into
+   * DEST_FS_IMAGE
+   */
+  private void doMerge() throws IOException {
+    FSNamesystem namesystem = new FSNamesystem(
+                                               new FSImage(checkpointDir));
+    FSImage fsImage = namesystem.dir.fsImage;
+    fsImage.loadFSImage(srcImage);
+    fsImage.getEditLog().loadFSEdits(editFile);
+    fsImage.saveFSImage(destImage);
+  }
+
+  /**
+   * @param argv The parameters passed to this program.
+   * @exception Exception if the filesystem does not exist.
+   * @return 0 on success, non zero on error.
+   */
+  private int processArgs(String[] argv) throws Exception {
+
+    if (argv.length < 1) {
+      printUsage("");
+      return -1;
     }
 
-    /**
-     * @param argv The parameters passed to this program.
-     * @exception Exception if the filesystem does not exist.
-     * @return 0 on success, non zero on error.
-     */
-    private int processArgs(String[] argv) throws Exception {
+    int exitCode = -1;
+    int i = 0;
+    String cmd = argv[i++];
 
-      if (argv.length < 1) {
-          printUsage("");
-          return -1;
+    //
+    // verify that we have enough command line parameters
+    //
+    if ("-geteditsize".equals(cmd)) {
+      if (argv.length != 1) {
+        printUsage(cmd);
+        return exitCode;
       }
-
-      int exitCode = -1;
-      int i = 0;
-      String cmd = argv[i++];
-
-      //
-      // verify that we have enough command line parameters
-      //
-      if ("-geteditsize".equals(cmd)) {
-        if (argv.length != 1) {
-          printUsage(cmd);
-          return exitCode;
-        }
-      } else if ("-checkpoint".equals(cmd)) {
-        if (argv.length != 1 && argv.length != 2) {
-          printUsage(cmd);
-          return exitCode;
-        }
-        if (argv.length == 2 && !"force".equals(argv[i])) {
-          printUsage(cmd);
-          return exitCode;
-        }
+    } else if ("-checkpoint".equals(cmd)) {
+      if (argv.length != 1 && argv.length != 2) {
+        printUsage(cmd);
+        return exitCode;
       }
-
-      exitCode = 0;
-      try {
-        if ("-checkpoint".equals(cmd)) {
-          long size = namenode.getEditLogSize();
-          if (size >= checkpointSize || 
-              argv.length == 2 && "force".equals(argv[i])) {
-            doCheckpoint();
-          } else {
-            System.err.println("EditLog size " + size + " bytes is " +
-                               "smaller than configured checkpoint " +
-                               "size " + checkpointSize + " bytes.");
-            System.err.println("Skipping checkpoint.");
-          }
-        } else if ("-geteditsize".equals(cmd)) {
-          long size = namenode.getEditLogSize();
-          System.out.println("EditLog size is " + size + " bytes");
-        } else {
-          exitCode = -1;
-          LOG.error(cmd.substring(1) + ": Unknown command");
-          printUsage("");
-        }
-      } catch (RemoteException e) {
-          //
-          // This is a error returned by hadoop server. Print
-          // out the first line of the error mesage, ignore the stack trace.
-          exitCode = -1;
-          try {
-            String[] content;
-            content = e.getLocalizedMessage().split("\n");
-            LOG.error(cmd.substring(1) + ": "
-                               + content[0]);
-          } catch (Exception ex) {
-            LOG.error(cmd.substring(1) + ": "
-                               + ex.getLocalizedMessage());
-          }
-        } catch (IOException e) {
-          //
-          // IO exception encountered locally.
-          //
-          exitCode = -1;
-          LOG.error(cmd.substring(1) + ": "
-                             + e.getLocalizedMessage());
-        } finally {
-            // Does the RPC connection need to be closed?
-        }
+      if (argv.length == 2 && !"force".equals(argv[i])) {
+        printUsage(cmd);
         return exitCode;
+      }
     }
 
-    /**
-     * Displays format of commands.
-     * @param cmd The command that is being executed.
-     */
-    private void printUsage(String cmd) {
-      if ("-geteditsize".equals(cmd)) {
-        System.err.println("Usage: java SecondaryNameNode"
-                           + " [-geteditsize]");
-      } else if ("-checkpoint".equals(cmd)) {
-        System.err.println("Usage: java SecondaryNameNode"
-                           + " [-checkpoint [force]]");
+    exitCode = 0;
+    try {
+      if ("-checkpoint".equals(cmd)) {
+        long size = namenode.getEditLogSize();
+        if (size >= checkpointSize || 
+            argv.length == 2 && "force".equals(argv[i])) {
+          doCheckpoint();
+        } else {
+          System.err.println("EditLog size " + size + " bytes is " +
+                             "smaller than configured checkpoint " +
+                             "size " + checkpointSize + " bytes.");
+          System.err.println("Skipping checkpoint.");
+        }
+      } else if ("-geteditsize".equals(cmd)) {
+        long size = namenode.getEditLogSize();
+        System.out.println("EditLog size is " + size + " bytes");
       } else {
-        System.err.println("Usage: java SecondaryNameNode " +
-                           "[-checkpoint [force]] " +
-                           "[-geteditsize] ");
+        exitCode = -1;
+        LOG.error(cmd.substring(1) + ": Unknown command");
+        printUsage("");
       }
-    }
-
-    //
-    // utility method to facilitate junit test error simulation
-    //
-    void initializeErrorSimulationEvent(int numberOfEvents) {
-      simulation = new boolean[numberOfEvents]; 
-      for (int i = 0; i < numberOfEvents; i++) {
-        simulation[i] = false;
+    } catch (RemoteException e) {
+      //
+      // This is a error returned by hadoop server. Print
+      // out the first line of the error mesage, ignore the stack trace.
+      exitCode = -1;
+      try {
+        String[] content;
+        content = e.getLocalizedMessage().split("\n");
+        LOG.error(cmd.substring(1) + ": "
+                  + content[0]);
+      } catch (Exception ex) {
+        LOG.error(cmd.substring(1) + ": "
+                  + ex.getLocalizedMessage());
       }
+    } catch (IOException e) {
+      //
+      // IO exception encountered locally.
+      //
+      exitCode = -1;
+      LOG.error(cmd.substring(1) + ": "
+                + e.getLocalizedMessage());
+    } finally {
+      // Does the RPC connection need to be closed?
     }
-
-    void setErrorSimulation(int index) {
-      assert(index < simulation.length);
-      simulation[index] = true;
+    return exitCode;
+  }
+
+  /**
+   * Displays format of commands.
+   * @param cmd The command that is being executed.
+   */
+  private void printUsage(String cmd) {
+    if ("-geteditsize".equals(cmd)) {
+      System.err.println("Usage: java SecondaryNameNode"
+                         + " [-geteditsize]");
+    } else if ("-checkpoint".equals(cmd)) {
+      System.err.println("Usage: java SecondaryNameNode"
+                         + " [-checkpoint [force]]");
+    } else {
+      System.err.println("Usage: java SecondaryNameNode " +
+                         "[-checkpoint [force]] " +
+                         "[-geteditsize] ");
     }
-
-    void clearErrorSimulation(int index) {
-      assert(index < simulation.length);
-      simulation[index] = false;
+  }
+
+  //
+  // utility method to facilitate junit test error simulation
+  //
+  void initializeErrorSimulationEvent(int numberOfEvents) {
+    simulation = new boolean[numberOfEvents]; 
+    for (int i = 0; i < numberOfEvents; i++) {
+      simulation[i] = false;
     }
-
-   /**
-     * This class is used in Namesystem's jetty to retrieve a file.
-     * Typically used by the Secondary NameNode to retrieve image and
-     * edit file for periodic checkpointing.
-     * @author Dhruba Borthakur
-     */
-    public static class GetImageServlet extends HttpServlet {
-      public void doGet(HttpServletRequest request,
-          HttpServletResponse response
-          ) throws ServletException, IOException {
-        Map<String,String[]> pmap = request.getParameterMap();
-        try {
-          ServletContext context = getServletContext();
-          SecondaryNameNode nn = (SecondaryNameNode) 
-                                  context.getAttribute("name.secondary");
-          TransferFsImage ff = new TransferFsImage(pmap, request, response);
-          if (ff.getImage()) {
-            TransferFsImage.getFileServer(response.getOutputStream(),
-                                   nn.getNewImage());
-          }
-          LOG.info("New Image " + nn.getNewImage() + " retrieved by Namenode.");
-        } catch (IOException ie) {
-          StringUtils.stringifyException(ie);
-          LOG.error(ie);
-          String errMsg = "GetImage failed.";
-          response.sendError(HttpServletResponse.SC_GONE, errMsg);
-          throw ie;
-
+  }
+
+  void setErrorSimulation(int index) {
+    assert(index < simulation.length);
+    simulation[index] = true;
+  }
+
+  void clearErrorSimulation(int index) {
+    assert(index < simulation.length);
+    simulation[index] = false;
+  }
+
+  /**
+   * This class is used in Namesystem's jetty to retrieve a file.
+   * Typically used by the Secondary NameNode to retrieve image and
+   * edit file for periodic checkpointing.
+   * @author Dhruba Borthakur
+   */
+  public static class GetImageServlet extends HttpServlet {
+    public void doGet(HttpServletRequest request,
+                      HttpServletResponse response
+                      ) throws ServletException, IOException {
+      Map<String,String[]> pmap = request.getParameterMap();
+      try {
+        ServletContext context = getServletContext();
+        SecondaryNameNode nn = (SecondaryNameNode) 
+          context.getAttribute("name.secondary");
+        TransferFsImage ff = new TransferFsImage(pmap, request, response);
+        if (ff.getImage()) {
+          TransferFsImage.getFileServer(response.getOutputStream(),
+                                        nn.getNewImage());
         }
+        LOG.info("New Image " + nn.getNewImage() + " retrieved by Namenode.");
+      } catch (IOException ie) {
+        StringUtils.stringifyException(ie);
+        LOG.error(ie);
+        String errMsg = "GetImage failed.";
+        response.sendError(HttpServletResponse.SC_GONE, errMsg);
+        throw ie;
+
       }
     }
-
-    /**
-     * main() has some simple utility methods.
-     * @param argv Command line parameters.
-     * @exception Exception if the filesystem does not exist.
-     */
-    public static void main(String[] argv) throws Exception {
-        Configuration tconf = new Configuration();
-        if (argv.length >= 1) {
-          SecondaryNameNode secondary = new SecondaryNameNode(tconf);
-          int ret = secondary.processArgs(argv);
-          System.exit(ret);
-        }
-
-        // Create a never ending deamon
-        Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); 
-        checkpointThread.start();
+  }
+
+  /**
+   * main() has some simple utility methods.
+   * @param argv Command line parameters.
+   * @exception Exception if the filesystem does not exist.
+   */
+  public static void main(String[] argv) throws Exception {
+    Configuration tconf = new Configuration();
+    if (argv.length >= 1) {
+      SecondaryNameNode secondary = new SecondaryNameNode(tconf);
+      int ret = secondary.processArgs(argv);
+      System.exit(ret);
     }
+
+    // Create a never ending deamon
+    Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); 
+    checkpointThread.start();
+  }
 }

+ 2 - 2
src/java/org/apache/hadoop/dfs/StreamFile.java

@@ -41,7 +41,7 @@ public class StreamFile extends HttpServlet {
     }
   }
   public void doGet(HttpServletRequest request, HttpServletResponse response)
-                                     throws ServletException, IOException {
+    throws ServletException, IOException {
     String filename = request.getParameter("filename");
     if (filename == null || filename.length() == 0) {
       response.setContentType("text/plain");
@@ -53,7 +53,7 @@ public class StreamFile extends HttpServlet {
     FSInputStream in = dfs.open(new UTF8(filename));
     OutputStream os = response.getOutputStream();
     response.setHeader("Content-Disposition", "attachment; filename=\"" + 
-                        filename + "\"");
+                       filename + "\"");
     response.setContentType("application/octet-stream");
     byte buf[] = new byte[4096];
     try {

+ 44 - 44
src/java/org/apache/hadoop/dfs/TransferFsImage.java

@@ -107,7 +107,7 @@ class TransferFsImage implements FSConstants {
    * Copies the contents of the local file into the output stream.
    */
   static void getFileServer(OutputStream outstream, File localfile) 
-                                                 throws IOException {
+    throws IOException {
     byte buf[] = new byte[BUFFER_SIZE];
     FileInputStream infile = null;
     try {
@@ -132,54 +132,54 @@ class TransferFsImage implements FSConstants {
    * Client-side Method to fetch file from a server
    * Copies the response from the URL to a list of local files.
    */
-   static void getFileClient(String fsName, String id, File[] localPath)
-                             throws IOException {
-     byte[] buf = new byte[BUFFER_SIZE];
-     StringBuffer str = new StringBuffer("http://"+fsName+"/getimage?");
-     str.append(id);
+  static void getFileClient(String fsName, String id, File[] localPath)
+    throws IOException {
+    byte[] buf = new byte[BUFFER_SIZE];
+    StringBuffer str = new StringBuffer("http://"+fsName+"/getimage?");
+    str.append(id);
 
-     //
-     // open connection to remote server
-     //
-     URL url = new URL(str.toString());
-     URLConnection connection = url.openConnection();
-     InputStream stream = connection.getInputStream();
-     FileOutputStream[] output = null;
-     if (localPath != null) {
-       output = new FileOutputStream[localPath.length];
-       for (int i = 0; i < output.length; i++) {
-         output[i] = new FileOutputStream(localPath[i]);
-       }
-     }
+    //
+    // open connection to remote server
+    //
+    URL url = new URL(str.toString());
+    URLConnection connection = url.openConnection();
+    InputStream stream = connection.getInputStream();
+    FileOutputStream[] output = null;
+    if (localPath != null) {
+      output = new FileOutputStream[localPath.length];
+      for (int i = 0; i < output.length; i++) {
+        output[i] = new FileOutputStream(localPath[i]);
+      }
+    }
 
-     try {
-       int num = 1;
-       while (num > 0) {
-         num = stream.read(buf);
-         if (num > 0 && localPath != null) {
-           for (int i = 0; i < output.length; i++) {
-             output[i].write(buf, 0, num);
-           }
-         }
-       }
-     } finally {
-       stream.close();
-       if (localPath != null) {
-         for (int i = 0; i < output.length; i++) {
-           output[i].close();
-         }
-       }
-     }
-   }
+    try {
+      int num = 1;
+      while (num > 0) {
+        num = stream.read(buf);
+        if (num > 0 && localPath != null) {
+          for (int i = 0; i < output.length; i++) {
+            output[i].write(buf, 0, num);
+          }
+        }
+      }
+    } finally {
+      stream.close();
+      if (localPath != null) {
+        for (int i = 0; i < output.length; i++) {
+          output[i].close();
+        }
+      }
+    }
+  }
 
   /**
    * Client-side Method to fetch file from a server
    * Copies the response from the URL to the local file.
    */
-   static void getFileClient(String fsName, String id, File localPath)
-                             throws IOException {
-     File[] filelist = new File[1];
-     filelist[0] = localPath;
-     getFileClient(fsName, id, filelist);
-   }
+  static void getFileClient(String fsName, String id, File localPath)
+    throws IOException {
+    File[] filelist = new File[1];
+    filelist[0] = localPath;
+    getFileClient(fsName, id, filelist);
+  }
 }

+ 114 - 114
src/java/org/apache/hadoop/filecache/DistributedCache.java

@@ -63,7 +63,7 @@ public class DistributedCache {
    * @throws IOException
    */
   public static Path getLocalCache(URI cache, Configuration conf, Path baseDir,
-      boolean isArchive, String md5, Path currentWorkDir) throws IOException {
+                                   boolean isArchive, String md5, Path currentWorkDir) throws IOException {
     String cacheId = makeRelative(cache, conf);
     CacheStatus lcacheStatus;
     Path localizedPath;
@@ -105,7 +105,7 @@ public class DistributedCache {
    * @throws IOException
    */
   public static void releaseCache(URI cache, Configuration conf)
-      throws IOException {
+    throws IOException {
     String cacheId = makeRelative(cache, conf);
     synchronized (cachedArchives) {
       CacheStatus lcacheStatus = (CacheStatus) cachedArchives.get(cacheId);
@@ -143,7 +143,7 @@ public class DistributedCache {
    * on/absolute_path
    */
   private static String makeRelative(URI cache, Configuration conf)
-      throws IOException {
+    throws IOException {
     String fsname = cache.getScheme();
     String path;
     FileSystem dfs = FileSystem.get(conf);
@@ -162,7 +162,7 @@ public class DistributedCache {
 
   // the methoed which actually copies the caches locally and unjars/unzips them
   private static Path localizeCache(URI cache, CacheStatus cacheStatus,
-      Configuration conf, boolean isArchive, String md5, Path currentWorkDir) throws IOException {
+                                    Configuration conf, boolean isArchive, String md5, Path currentWorkDir) throws IOException {
     boolean b = true;
     boolean doSymlink = getSymlink(conf);
     FileSystem dfs = getFileSystem(cache, conf);
@@ -174,7 +174,7 @@ public class DistributedCache {
         if (doSymlink){
           if (!flink.exists())
             FileUtil.symLink(cacheStatus.localLoadPath.toString(), 
-                link);
+                             link);
         }
         return cacheStatus.localLoadPath;
       }
@@ -182,7 +182,7 @@ public class DistributedCache {
         if (doSymlink){
           if (!flink.exists())
             FileUtil.symLink(cacheFilePath(cacheStatus.localLoadPath).toString(), 
-              link);
+                             link);
         }
         return cacheFilePath(cacheStatus.localLoadPath);
       }
@@ -193,29 +193,29 @@ public class DistributedCache {
       // return null
       if (cacheStatus.refcount > 1 && (cacheStatus.currentStatus == true))
         throw new IOException("Cache " + cacheStatus.localLoadPath.toString()
-            + " is in use and cannot be refreshed");
+                              + " is in use and cannot be refreshed");
       byte[] checkSum = createMD5(cache, conf);
       FileSystem localFs = FileSystem.getLocal(conf);
       localFs.delete(cacheStatus.localLoadPath);
       Path parchive = new Path(cacheStatus.localLoadPath,
                                new Path(cacheStatus.localLoadPath.getName()));
       if (!localFs.mkdirs(cacheStatus.localLoadPath)) {
-          throw new IOException("Mkdirs failed to create directory " + 
-                                cacheStatus.localLoadPath.toString());
+        throw new IOException("Mkdirs failed to create directory " + 
+                              cacheStatus.localLoadPath.toString());
       }
       String cacheId = cache.getPath();
       dfs.copyToLocalFile(new Path(cacheId), parchive);
       dfs.copyToLocalFile(new Path(cacheId + "_md5"), new Path(parchive
-          .toString()
-          + "_md5"));
+                                                               .toString()
+                                                               + "_md5"));
       if (isArchive) {
         String tmpArchive = parchive.toString().toLowerCase();
         if (tmpArchive.endsWith(".jar")) {
           RunJar.unJar(new File(parchive.toString()), new File(parchive
-              .getParent().toString()));
+                                                               .getParent().toString()));
         } else if (tmpArchive.endsWith(".zip")) {
           FileUtil.unZip(new File(parchive.toString()), new File(parchive
-              .getParent().toString()));
+                                                                 .getParent().toString()));
 
         }
         // else will not do anyhting
@@ -231,7 +231,7 @@ public class DistributedCache {
       if (doSymlink){
         if (!flink.exists())
           FileUtil.symLink(cacheStatus.localLoadPath.toString(), 
-            link);
+                           link);
       }
       return cacheStatus.localLoadPath;
     }
@@ -239,7 +239,7 @@ public class DistributedCache {
       if (doSymlink){
         if (!flink.exists())
           FileUtil.symLink(cacheFilePath(cacheStatus.localLoadPath).toString(), 
-            link);
+                           link);
       }
       return cacheFilePath(cacheStatus.localLoadPath);
     }
@@ -247,7 +247,7 @@ public class DistributedCache {
 
   // Checks if the cache has already been localized and is fresh
   private static boolean ifExistsAndFresh(CacheStatus lcacheStatus, URI cache,
-      FileSystem dfs, String confMD5, Configuration conf) throws IOException {
+                                          FileSystem dfs, String confMD5, Configuration conf) throws IOException {
     // compute the md5 of the crc
     byte[] digest = null;
     byte[] fsDigest = createMD5(cache, conf);
@@ -259,7 +259,7 @@ public class DistributedCache {
       digest = lcacheStatus.md5;
       if (!MessageDigest.isEqual(confDigest, fsDigest)) {
         throw new IOException("Inconsistencty in data caching, "
-            + "Cache archives have been changed");
+                              + "Cache archives have been changed");
       } else {
         if (!MessageDigest.isEqual(confDigest, digest)) {
           // needs refreshing
@@ -283,19 +283,19 @@ public class DistributedCache {
    * @throws IOException
    */
   public static byte[] createMD5(URI cache, Configuration conf)
-      throws IOException {
+    throws IOException {
     byte[] b = new byte[CRC_BUFFER_SIZE];
     byte[] digest = null;
 
     FileSystem fileSystem = getFileSystem(cache, conf);
     if(!(fileSystem instanceof ChecksumFileSystem)) {
-        throw new IOException( "Not a checksummed file system: "
-                +fileSystem.getUri() );
+      throw new IOException( "Not a checksummed file system: "
+                             +fileSystem.getUri() );
     }
     String filename = cache.getPath();
     Path filePath = new Path(filename);
     Path md5File = new Path(filePath.getParent().toString() + Path.SEPARATOR
-        + filePath.getName() + "_md5");
+                            + filePath.getName() + "_md5");
     MessageDigest md5 = null;
     try {
       md5 = MessageDigest.getInstance("MD5");
@@ -305,13 +305,13 @@ public class DistributedCache {
     if (!fileSystem.exists(md5File)) {
       ChecksumFileSystem checksumFs;
       if(!(fileSystem instanceof ChecksumFileSystem)) {
-          throw new IOException(
-                  "Not a checksumed file system: "+fileSystem.getUri());
+        throw new IOException(
+                              "Not a checksumed file system: "+fileSystem.getUri());
       } else {
-          checksumFs = (ChecksumFileSystem)fileSystem;
+        checksumFs = (ChecksumFileSystem)fileSystem;
       }
       FSDataInputStream fsStream = checksumFs.getRawFileSystem().open(
-              checksumFs.getChecksumFile(filePath));
+                                                                      checksumFs.getChecksumFile(filePath));
       int read = fsStream.read(b);
       while (read != -1) {
         md5.update(b, 0, read);
@@ -343,18 +343,18 @@ public class DistributedCache {
    * @throws IOException
    */
   public static void createAllSymlink(Configuration conf, File jobCacheDir, File workDir)
-  throws IOException{
+    throws IOException{
     if ((!jobCacheDir.isDirectory()) || (!workDir.isDirectory())){
       return;
     }
     boolean createSymlink = getSymlink(conf);
-     if (createSymlink){
-       File[] list = jobCacheDir.listFiles();
-       for (int i=0; i < list.length; i++){
-         FileUtil.symLink(list[i].getAbsolutePath(),
-             new File(workDir, list[i].getName()).toString());
-       }
-     }  
+    if (createSymlink){
+      File[] list = jobCacheDir.listFiles();
+      for (int i=0; i < list.length; i++){
+        FileUtil.symLink(list[i].getAbsolutePath(),
+                         new File(workDir, list[i].getName()).toString());
+      }
+    }  
   }
   
   private static String getFileSysName(URI url) {
@@ -369,7 +369,7 @@ public class DistributedCache {
   }
 
   private static FileSystem getFileSystem(URI cache, Configuration conf)
-      throws IOException {
+    throws IOException {
     String fileSysName = getFileSysName(cache);
     if (fileSysName != null)
       return FileSystem.getNamed(fileSysName, conf);
@@ -425,9 +425,9 @@ public class DistributedCache {
    * @throws IOException
    */
   public static Path[] getLocalCacheArchives(Configuration conf)
-      throws IOException {
+    throws IOException {
     return StringUtils.stringToPath(conf
-        .getStrings("mapred.cache.localArchives"));
+                                    .getStrings("mapred.cache.localArchives"));
   }
 
   /**
@@ -437,7 +437,7 @@ public class DistributedCache {
    * @throws IOException
    */
   public static Path[] getLocalCacheFiles(Configuration conf)
-      throws IOException {
+    throws IOException {
     return StringUtils.stringToPath(conf.getStrings("mapred.cache.localFiles"));
   }
 
@@ -508,7 +508,7 @@ public class DistributedCache {
   public static void addCacheArchive(URI uri, Configuration conf) {
     String archives = conf.get("mapred.cache.archives");
     conf.set("mapred.cache.archives", archives == null ? uri.toString()
-        : archives + "," + uri.toString());
+             : archives + "," + uri.toString());
   }
   
   /**
@@ -519,81 +519,81 @@ public class DistributedCache {
   public static void addCacheFile(URI uri, Configuration conf) {
     String files = conf.get("mapred.cache.files");
     conf.set("mapred.cache.files", files == null ? uri.toString() : files + ","
-        + uri.toString());
-  }
-
-	/**
-	 * Add an file path to the current set of classpath entries It adds the file
-	 * to cache as well.
-	 * 
-	 * @param file Path of the file to be added
-	 * @param conf Configuration that contains the classpath setting
-	 */
-	public static void addFileToClassPath(Path file, Configuration conf)
-			throws IOException {
-		String classpath = conf.get("mapred.job.classpath.files");
-		conf.set("mapred.job.classpath.files", classpath == null ? file.toString()
-				: classpath + System.getProperty("path.separator") + file.toString());
-		FileSystem fs = FileSystem.get(conf);
-		URI uri = fs.makeQualified(file).toUri();
-
-		addCacheFile(uri, conf);
-	}
-
-	/**
-	 * Get the file entries in classpath as an array of Path
-	 * 
-	 * @param conf Configuration that contains the classpath setting
-	 */
-	public static Path[] getFileClassPaths(Configuration conf) {
-		String classpath = conf.get("mapred.job.classpath.files");
-		if (classpath == null)
-			return null;
-		ArrayList list = Collections.list(new StringTokenizer(classpath, System
-				.getProperty("path.separator")));
-		Path[] paths = new Path[list.size()];
-		for (int i = 0; i < list.size(); i++) {
-			paths[i] = new Path((String) list.get(i));
-		}
-		return paths;
-	}
-
-	/**
-	 * Add an archive path to the current set of classpath entries. It adds the
-	 * archive to cache as well.
-	 * 
-	 * @param archive Path of the archive to be added
-	 * @param conf Configuration that contains the classpath setting
-	 */
-	public static void addArchiveToClassPath(Path archive, Configuration conf)
-			throws IOException {
-		String classpath = conf.get("mapred.job.classpath.archives");
-		conf.set("mapred.job.classpath.archives", classpath == null ? archive
-				.toString() : classpath + System.getProperty("path.separator")
-				+ archive.toString());
-		FileSystem fs = FileSystem.get(conf);
-		URI uri = fs.makeQualified(archive).toUri();
-
-		addCacheArchive(uri, conf);
-	}
-
-	/**
-	 * Get the archive entries in classpath as an array of Path
-	 * 
-	 * @param conf Configuration that contains the classpath setting
-	 */
-	public static Path[] getArchiveClassPaths(Configuration conf) {
-		String classpath = conf.get("mapred.job.classpath.archives");
-		if (classpath == null)
-			return null;
-		ArrayList list = Collections.list(new StringTokenizer(classpath, System
-				.getProperty("path.separator")));
-		Path[] paths = new Path[list.size()];
-		for (int i = 0; i < list.size(); i++) {
-			paths[i] = new Path((String) list.get(i));
-		}
-		return paths;
-	}
+             + uri.toString());
+  }
+
+  /**
+   * Add an file path to the current set of classpath entries It adds the file
+   * to cache as well.
+   * 
+   * @param file Path of the file to be added
+   * @param conf Configuration that contains the classpath setting
+   */
+  public static void addFileToClassPath(Path file, Configuration conf)
+    throws IOException {
+    String classpath = conf.get("mapred.job.classpath.files");
+    conf.set("mapred.job.classpath.files", classpath == null ? file.toString()
+             : classpath + System.getProperty("path.separator") + file.toString());
+    FileSystem fs = FileSystem.get(conf);
+    URI uri = fs.makeQualified(file).toUri();
+
+    addCacheFile(uri, conf);
+  }
+
+  /**
+   * Get the file entries in classpath as an array of Path
+   * 
+   * @param conf Configuration that contains the classpath setting
+   */
+  public static Path[] getFileClassPaths(Configuration conf) {
+    String classpath = conf.get("mapred.job.classpath.files");
+    if (classpath == null)
+      return null;
+    ArrayList list = Collections.list(new StringTokenizer(classpath, System
+                                                          .getProperty("path.separator")));
+    Path[] paths = new Path[list.size()];
+    for (int i = 0; i < list.size(); i++) {
+      paths[i] = new Path((String) list.get(i));
+    }
+    return paths;
+  }
+
+  /**
+   * Add an archive path to the current set of classpath entries. It adds the
+   * archive to cache as well.
+   * 
+   * @param archive Path of the archive to be added
+   * @param conf Configuration that contains the classpath setting
+   */
+  public static void addArchiveToClassPath(Path archive, Configuration conf)
+    throws IOException {
+    String classpath = conf.get("mapred.job.classpath.archives");
+    conf.set("mapred.job.classpath.archives", classpath == null ? archive
+             .toString() : classpath + System.getProperty("path.separator")
+             + archive.toString());
+    FileSystem fs = FileSystem.get(conf);
+    URI uri = fs.makeQualified(archive).toUri();
+
+    addCacheArchive(uri, conf);
+  }
+
+  /**
+   * Get the archive entries in classpath as an array of Path
+   * 
+   * @param conf Configuration that contains the classpath setting
+   */
+  public static Path[] getArchiveClassPaths(Configuration conf) {
+    String classpath = conf.get("mapred.job.classpath.archives");
+    if (classpath == null)
+      return null;
+    ArrayList list = Collections.list(new StringTokenizer(classpath, System
+                                                          .getProperty("path.separator")));
+    Path[] paths = new Path[list.size()];
+    for (int i = 0; i < list.size(); i++) {
+      paths[i] = new Path((String) list.get(i));
+    }
+    return paths;
+  }
 
   /**
    * This method allows you to create symlinks in the current working directory
@@ -655,7 +655,7 @@ public class DistributedCache {
               if (frag3 == null)
                 return false;
               if (frag2.equalsIgnoreCase(frag3))
-                  return false;
+                return false;
             }
           }
         }

+ 9 - 9
src/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -217,7 +217,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
             LOG.info("Found checksum error: "+StringUtils.stringifyException(ce));
             long errPos = ce.getPos();
             boolean shouldRetry = fs.reportChecksumFailure(
-                file, datas, errPos, sums, errPos/bytesPerSum);
+                                                           file, datas, errPos, sums, errPos/bytesPerSum);
             if (!shouldRetry || retriesLeft == 0) {
               throw ce;
             }
@@ -226,14 +226,14 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
             datas.seek(oldPos);
             
             if (seekToNewSource(oldPos)) {
-                // Since at least one of the sources is different, 
-                // the read might succeed, so we'll retry.
-                retry = true;
+              // Since at least one of the sources is different, 
+              // the read might succeed, so we'll retry.
+              retry = true;
             } else {
-                // Neither the data stream nor the checksum stream are being read
-                // from different sources, meaning we'll still get a checksum error 
-                // if we try to do the read again.  We throw an exception instead.
-                throw ce;
+              // Neither the data stream nor the checksum stream are being read
+              // from different sources, meaning we'll still get a checksum error 
+              // if we try to do the read again.  We throw an exception instead.
+              throw ce;
             }
           }
         }
@@ -636,7 +636,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * @return if retry is neccessary
    */
   public boolean reportChecksumFailure(Path f, FSDataInputStream in,
-                                             long inPos, FSDataInputStream sums, long sumsPos) {
+                                       long inPos, FSDataInputStream sums, long sumsPos) {
     return false;
   }
 }

+ 3 - 3
src/java/org/apache/hadoop/fs/DF.java

@@ -62,11 +62,11 @@ public class DF {
     try {
       if (process.waitFor() != 0) {
         throw new IOException
-        (new BufferedReader(new InputStreamReader(process.getErrorStream()))
-         .readLine());
+          (new BufferedReader(new InputStreamReader(process.getErrorStream()))
+           .readLine());
       }
       parseExecResult(
-        new BufferedReader(new InputStreamReader(process.getInputStream())));
+                      new BufferedReader(new InputStreamReader(process.getInputStream())));
     } catch (InterruptedException e) {
       throw new IOException(e.toString());
     } finally {

+ 37 - 37
src/java/org/apache/hadoop/fs/FSInputStream.java

@@ -27,53 +27,53 @@ import java.io.*;
  *****************************************************************/
 public abstract class FSInputStream extends InputStream
     implements Seekable, PositionedReadable {
-    /**
-     * Seek to the given offset from the start of the file.
-     * The next read() will be from that location.  Can't
-     * seek past the end of the file.
-     */
-    public abstract void seek(long pos) throws IOException;
+  /**
+   * Seek to the given offset from the start of the file.
+   * The next read() will be from that location.  Can't
+   * seek past the end of the file.
+   */
+  public abstract void seek(long pos) throws IOException;
 
-    /**
-     * Return the current offset from the start of the file
-     */
-    public abstract long getPos() throws IOException;
+  /**
+   * Return the current offset from the start of the file
+   */
+  public abstract long getPos() throws IOException;
 
-    /**
-     * Seeks a different copy of the data.  Returns true if 
-     * found a new source, false otherwise.
-     */
-    public abstract boolean seekToNewSource(long targetPos) throws IOException;
+  /**
+   * Seeks a different copy of the data.  Returns true if 
+   * found a new source, false otherwise.
+   */
+  public abstract boolean seekToNewSource(long targetPos) throws IOException;
 
-    public int read(long position, byte[] buffer, int offset, int length)
+  public int read(long position, byte[] buffer, int offset, int length)
     throws IOException {
-      synchronized (this) {
-        long oldPos = getPos();
-        int nread = -1;
-        try {
-          seek(position);
-          nread = read(buffer, offset, length);
-        } finally {
-          seek(oldPos);
-        }
-        return nread;
+    synchronized (this) {
+      long oldPos = getPos();
+      int nread = -1;
+      try {
+        seek(position);
+        nread = read(buffer, offset, length);
+      } finally {
+        seek(oldPos);
       }
+      return nread;
     }
+  }
     
-    public void readFully(long position, byte[] buffer, int offset, int length)
+  public void readFully(long position, byte[] buffer, int offset, int length)
     throws IOException {
-      int nread = 0;
-      while (nread < length) {
-        int nbytes = read(position+nread, buffer, offset+nread, length-nread);
-        if (nbytes < 0) {
-          throw new EOFException("End of file reached before reading fully.");
-        }
-        nread += nbytes;
+    int nread = 0;
+    while (nread < length) {
+      int nbytes = read(position+nread, buffer, offset+nread, length-nread);
+      if (nbytes < 0) {
+        throw new EOFException("End of file reached before reading fully.");
       }
+      nread += nbytes;
     }
+  }
     
-    public void readFully(long position, byte[] buffer)
+  public void readFully(long position, byte[] buffer)
     throws IOException {
-      readFully(position, buffer, 0, buffer.length);
-    }
+    readFully(position, buffer, 0, buffer.length);
+  }
 }

+ 42 - 42
src/java/org/apache/hadoop/fs/FileUtil.java

@@ -68,42 +68,42 @@ public class FileUtil {
                              FileSystem dstFS, Path dst, 
                              boolean deleteSource,
                              Configuration conf ) throws IOException {
-      dst = checkDest(src.getName(), dstFS, dst);
+    dst = checkDest(src.getName(), dstFS, dst);
 
-      if (srcFS.isDirectory(src)) {
-        if (!dstFS.mkdirs(dst)) {
-          return false;
-        }
-        Path contents[] = srcFS.listPaths(src);
-        for (int i = 0; i < contents.length; i++) {
-          copy(srcFS, contents[i], dstFS, new Path(dst, contents[i].getName()),
-               deleteSource, conf);
-        }
-      } else if (srcFS.isFile(src)) {
-        InputStream in = srcFS.open(src);
-        try {
-          OutputStream out = dstFS.create(dst);
-          copyContent(in, out, conf);
-        } finally {
-          in.close();
-        }
-      } else {
-        throw new IOException(src.toString() + ": No such file or directory");
+    if (srcFS.isDirectory(src)) {
+      if (!dstFS.mkdirs(dst)) {
+        return false;
       }
-      if (deleteSource) {
-        return srcFS.delete(src);
-      } else {
-        return true;
+      Path contents[] = srcFS.listPaths(src);
+      for (int i = 0; i < contents.length; i++) {
+        copy(srcFS, contents[i], dstFS, new Path(dst, contents[i].getName()),
+             deleteSource, conf);
+      }
+    } else if (srcFS.isFile(src)) {
+      InputStream in = srcFS.open(src);
+      try {
+        OutputStream out = dstFS.create(dst);
+        copyContent(in, out, conf);
+      } finally {
+        in.close();
       }
+    } else {
+      throw new IOException(src.toString() + ": No such file or directory");
+    }
+    if (deleteSource) {
+      return srcFS.delete(src);
+    } else {
+      return true;
+    }
   
   }
 
   /** Copy all files in a directory to one output file (merge). */
   public static boolean copyMerge(FileSystem srcFS, Path srcDir, 
-                             FileSystem dstFS, Path dstFile, 
-                             boolean deleteSource,
-                             Configuration conf, String addString) throws IOException {
-      dstFile = checkDest(srcDir.getName(), dstFS, dstFile);
+                                  FileSystem dstFS, Path dstFile, 
+                                  boolean deleteSource,
+                                  Configuration conf, String addString) throws IOException {
+    dstFile = checkDest(srcDir.getName(), dstFS, dstFile);
 
     if (!srcFS.isDirectory(srcDir))
       return false;
@@ -200,7 +200,7 @@ public class FileUtil {
   }
 
   private static void copyContent(InputStream in, OutputStream out,
-          Configuration conf) throws IOException {
+                                  Configuration conf) throws IOException {
     copyContent(in, out, conf, true);
   }
 
@@ -274,7 +274,7 @@ public class FileUtil {
     }
   }
     
-	/**
+  /**
    * Given a File input it will unzip the file in a the unzip directory
    * passed as the second parameter
    * @param inFile The zip file as input
@@ -363,10 +363,10 @@ public class FileUtil {
       try {
         if (process.waitFor() != 0) {
           String errMsg = new BufferedReader(new InputStreamReader(
-              process.getInputStream())).readLine();
+                                                                   process.getInputStream())).readLine();
           if( errMsg == null )  errMsg = "";
           String inpMsg = new BufferedReader(new InputStreamReader(
-              process.getErrorStream())).readLine();
+                                                                   process.getErrorStream())).readLine();
           if( inpMsg == null )  inpMsg = "";
           throw new IOException( errMsg + inpMsg );
         }
@@ -386,16 +386,16 @@ public class FileUtil {
    * @return value returned by the command
    */
   public static int symLink(String target, String linkname) throws IOException{
-   String cmd = "ln -s " + target + " " + linkname;
-   Process p = Runtime.getRuntime().exec( cmd, null );
-   int returnVal = -1;
-   try{
-     returnVal = p.waitFor();
-   } catch(InterruptedException e){
-     //do nothing as of yet
-   }
-   return returnVal;
- }
+    String cmd = "ln -s " + target + " " + linkname;
+    Process p = Runtime.getRuntime().exec( cmd, null );
+    int returnVal = -1;
+    try{
+      returnVal = p.waitFor();
+    } catch(InterruptedException e){
+      //do nothing as of yet
+    }
+    return returnVal;
+  }
   
   /**
    * Change the permissions on a filename.

+ 9 - 9
src/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -92,7 +92,7 @@ public class FilterFileSystem extends FileSystem {
    * The FileSystem will simply return an elt containing 'localhost'.
    */
   public String[][] getFileCacheHints(Path f, long start, long len)
-  throws IOException {
+    throws IOException {
     return fs.getFileCacheHints(f, start, len);
   }
 
@@ -120,7 +120,7 @@ public class FilterFileSystem extends FileSystem {
                                     short replication,
                                     long blockSize,
                                     Progressable progress
-                                   ) throws IOException {
+                                    ) throws IOException {
     return fs.create(f, overwrite, bufferSize, replication, blockSize, progress);
   }
 
@@ -217,7 +217,7 @@ public class FilterFileSystem extends FileSystem {
    * @deprecated FS does not support file locks anymore.
    */
   @Deprecated
-  public void lock(Path f, boolean shared) throws IOException {
+    public void lock(Path f, boolean shared) throws IOException {
     fs.lock(f, shared);
   }
 
@@ -227,7 +227,7 @@ public class FilterFileSystem extends FileSystem {
    * @deprecated FS does not support file locks anymore.     
    */
   @Deprecated
-  public void release(Path f) throws IOException {
+    public void release(Path f) throws IOException {
     fs.release(f);
   }
 
@@ -237,7 +237,7 @@ public class FilterFileSystem extends FileSystem {
    * delSrc indicates if the source should be removed
    */
   public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
-  throws IOException {
+    throws IOException {
     fs.copyFromLocalFile(delSrc, src, dst);
   }
   
@@ -247,7 +247,7 @@ public class FilterFileSystem extends FileSystem {
    * delSrc indicates if the src will be removed or not.
    */   
   public void copyToLocalFile(boolean delSrc, Path src, Path dst)
-  throws IOException {
+    throws IOException {
     fs.copyToLocalFile(delSrc, src, dst);
   }
   
@@ -258,7 +258,7 @@ public class FilterFileSystem extends FileSystem {
    * the FS is remote, we write into the tmp local area.
    */
   public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
-  throws IOException {
+    throws IOException {
     return fs.startLocalOutput(fsOutputFile, tmpLocalFile);
   }
 
@@ -269,7 +269,7 @@ public class FilterFileSystem extends FileSystem {
    * fsOutputFile.
    */
   public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
-  throws IOException {
+    throws IOException {
     fs.completeLocalOutput(fsOutputFile, tmpLocalFile);
   }
 
@@ -296,7 +296,7 @@ public class FilterFileSystem extends FileSystem {
   }
 
   @Override
-  public Configuration getConf() {
+    public Configuration getConf() {
     return fs.getConf();
   }
 }

+ 937 - 937
src/java/org/apache/hadoop/fs/FsShell.java

@@ -28,1068 +28,1068 @@ import org.apache.hadoop.util.ToolBase;
 /** Provide command line access to a FileSystem. */
 public class FsShell extends ToolBase {
 
-    protected FileSystem fs;
-    private Trash trash;
+  protected FileSystem fs;
+  private Trash trash;
 
-    /**
-     */
-    public FsShell() {
-    }
+  /**
+   */
+  public FsShell() {
+  }
 
-    public void init() throws IOException {
-        conf.setQuietMode(true);
-        this.fs = FileSystem.get(conf);
-        this.trash = new Trash(conf);
-    }
+  public void init() throws IOException {
+    conf.setQuietMode(true);
+    this.fs = FileSystem.get(conf);
+    this.trash = new Trash(conf);
+  }
 
-    /**
-     * Copies from one stream to another.
-     */
-    private void copyBytes(InputStream in, OutputStream out) throws IOException {
-      PrintStream ps = out instanceof PrintStream ? (PrintStream)out : null;
-      byte buf[] = new byte[conf.getInt("io.file.buffer.size", 4096)];
-      int bytesRead = in.read(buf);
-      while (bytesRead >= 0) {
-        out.write(buf, 0, bytesRead);
-        if ((ps != null) && ps.checkError()) {
-          throw new IOException("Unable to write to output stream.");
-        }
-        bytesRead = in.read(buf);
+  /**
+   * Copies from one stream to another.
+   */
+  private void copyBytes(InputStream in, OutputStream out) throws IOException {
+    PrintStream ps = out instanceof PrintStream ? (PrintStream)out : null;
+    byte buf[] = new byte[conf.getInt("io.file.buffer.size", 4096)];
+    int bytesRead = in.read(buf);
+    while (bytesRead >= 0) {
+      out.write(buf, 0, bytesRead);
+      if ((ps != null) && ps.checkError()) {
+        throw new IOException("Unable to write to output stream.");
       }
+      bytesRead = in.read(buf);
     }
+  }
       
-    /**
-     * Copies from stdin to the indicated file.
-     */
-    private void copyFromStdin(Path dst) throws IOException {
-      if (fs.isDirectory(dst)) {
-        throw new IOException("When source is stdin, destination must be a file.");
-      }
-      if (fs.exists(dst)) {
-        throw new IOException("Target " + dst.toString() + " already exists.");
-      }
-      FSDataOutputStream out = fs.create(dst); 
-      try {
-        copyBytes(System.in, out);
-      } finally {
-        out.close();
-      }
+  /**
+   * Copies from stdin to the indicated file.
+   */
+  private void copyFromStdin(Path dst) throws IOException {
+    if (fs.isDirectory(dst)) {
+      throw new IOException("When source is stdin, destination must be a file.");
     }
-
-    /** 
-     * Print from src to stdout.
-     */
-    private void printToStdout(Path src) throws IOException {
-      if (fs.isDirectory(src)) {
-        throw new IOException("Source must be a file.");
-      }
-      FSDataInputStream in = fs.open(src);
-      try {
-        copyBytes(in, System.out);
-      } finally {
-        in.close();
-      }
-
+    if (fs.exists(dst)) {
+      throw new IOException("Target " + dst.toString() + " already exists.");
+    }
+    FSDataOutputStream out = fs.create(dst); 
+    try {
+      copyBytes(System.in, out);
+    } finally {
+      out.close();
     }
+  }
 
-    /**
-     * Add a local file to the indicated FileSystem name. src is kept.
-     */
-    void copyFromLocal(Path src, String dstf) throws IOException {
-      if (src.toString().equals("-")) {
-        copyFromStdin(new Path(dstf));
-      } else {
-        fs.copyFromLocalFile(src, new Path(dstf));
-      }
+  /** 
+   * Print from src to stdout.
+   */
+  private void printToStdout(Path src) throws IOException {
+    if (fs.isDirectory(src)) {
+      throw new IOException("Source must be a file.");
+    }
+    FSDataInputStream in = fs.open(src);
+    try {
+      copyBytes(in, System.out);
+    } finally {
+      in.close();
     }
 
-    /**
-     * Add a local file to the indicated FileSystem name. src is removed.
-     */
-    void moveFromLocal(Path src, String dstf) throws IOException {
-        fs.moveFromLocalFile(src, new Path(dstf));
+  }
+
+  /**
+   * Add a local file to the indicated FileSystem name. src is kept.
+   */
+  void copyFromLocal(Path src, String dstf) throws IOException {
+    if (src.toString().equals("-")) {
+      copyFromStdin(new Path(dstf));
+    } else {
+      fs.copyFromLocalFile(src, new Path(dstf));
     }
+  }
+
+  /**
+   * Add a local file to the indicated FileSystem name. src is removed.
+   */
+  void moveFromLocal(Path src, String dstf) throws IOException {
+    fs.moveFromLocalFile(src, new Path(dstf));
+  }
 
-    /**
-     * Obtain the indicated files that match the file pattern <i>srcf</i>
-     * and copy them to the local name. srcf is kept.
-     * When copying mutiple files, the destination must be a directory. 
-     * Otherwise, IOException is thrown.
-     * @param argv: arguments
-     * @param pos: Ignore everything before argv[pos]  
-     * @exception: IOException  
-     * @see org.apache.hadoop.fs.FileSystem.globPaths 
-     */
-    void copyToLocal(String[]argv, int pos) throws IOException {
-      if(argv.length-pos<2 || (argv.length-pos==2 && argv[pos].equalsIgnoreCase("-crc"))) {
-        System.err.println("Usage: -get [-crc] <src> <dst>");
-        System.exit(-1);
+  /**
+   * Obtain the indicated files that match the file pattern <i>srcf</i>
+   * and copy them to the local name. srcf is kept.
+   * When copying mutiple files, the destination must be a directory. 
+   * Otherwise, IOException is thrown.
+   * @param argv: arguments
+   * @param pos: Ignore everything before argv[pos]  
+   * @exception: IOException  
+   * @see org.apache.hadoop.fs.FileSystem.globPaths 
+   */
+  void copyToLocal(String[]argv, int pos) throws IOException {
+    if(argv.length-pos<2 || (argv.length-pos==2 && argv[pos].equalsIgnoreCase("-crc"))) {
+      System.err.println("Usage: -get [-crc] <src> <dst>");
+      System.exit(-1);
+    }
+    boolean copyCrc = false;
+    if ("-crc".equalsIgnoreCase(argv[pos])) {
+      pos++;
+      copyCrc = true;
+    }
+    String srcf = argv[pos++];
+    String dstf = argv[pos++];
+    if( dstf.equals("-")) {
+      if (copyCrc) {
+        System.err.println("-crc option is not valid when destination is stdout.");
       }
-      boolean copyCrc = false;
-      if ("-crc".equalsIgnoreCase(argv[pos])) {
-        pos++;
-        copyCrc = true;
+      cat(srcf);
+    } else {
+      Path [] srcs = fs.globPaths( new Path(srcf) );
+      if( srcs.length > 1 && !new File( dstf ).isDirectory()) {
+        throw new IOException( "When copying multiple files, " 
+                               + "destination should be a directory." );
       }
-      String srcf = argv[pos++];
-      String dstf = argv[pos++];
-      if( dstf.equals("-")) {
-        if (copyCrc) {
-          System.err.println("-crc option is not valid when destination is stdout.");
-        }
-        cat(srcf);
-      } else {
-        Path [] srcs = fs.globPaths( new Path(srcf) );
-        if( srcs.length > 1 && !new File( dstf ).isDirectory()) {
-          throw new IOException( "When copying multiple files, " 
-                                 + "destination should be a directory." );
-        }
-        Path dst = new Path( dstf );
-        for( int i=0; i<srcs.length; i++ ) {
-          ((DistributedFileSystem)fs).copyToLocalFile(srcs[i], dst, copyCrc);
-        }
+      Path dst = new Path( dstf );
+      for( int i=0; i<srcs.length; i++ ) {
+        ((DistributedFileSystem)fs).copyToLocalFile(srcs[i], dst, copyCrc);
       }
     }
+  }
     
-    /**
-     * Get all the files in the directories that match the source file 
-     * pattern and merge and sort them to only one file on local fs 
-     * srcf is kept.
-     * @param srcf: a file pattern specifying source files
-     * @param dstf: a destination local file/directory 
-     * @exception: IOException  
-     * @see org.apache.hadoop.fs.FileSystem.globPaths 
-     */
-    void copyMergeToLocal(String srcf, Path dst) throws IOException {
-        copyMergeToLocal(srcf, dst, false);
-    }    
+  /**
+   * Get all the files in the directories that match the source file 
+   * pattern and merge and sort them to only one file on local fs 
+   * srcf is kept.
+   * @param srcf: a file pattern specifying source files
+   * @param dstf: a destination local file/directory 
+   * @exception: IOException  
+   * @see org.apache.hadoop.fs.FileSystem.globPaths 
+   */
+  void copyMergeToLocal(String srcf, Path dst) throws IOException {
+    copyMergeToLocal(srcf, dst, false);
+  }    
     
 
-    /**
-     * Get all the files in the directories that match the source file pattern
-     * and merge and sort them to only one file on local fs 
-     * srcf is kept.
-     * 
-     * Also adds a string between the files (useful for adding \n
-     * to a text file)
-     * @param srcf: a file pattern specifying source files
-     * @param dstf: a destination local file/directory
-     * @param endline: if an end of line character is added to a text file 
-     * @exception: IOException  
-     * @see org.apache.hadoop.fs.FileSystem.globPaths 
-     */
-    void copyMergeToLocal(String srcf, Path dst, boolean endline) throws IOException {
-      Path [] srcs = fs.globPaths( new Path( srcf ) );
-      for( int i=0; i<srcs.length; i++ ) {
-        if(endline) {
-            FileUtil.copyMerge(fs, srcs[i], 
-                    FileSystem.getLocal(conf), dst, false, conf, "\n");
-        } else {
-            FileUtil.copyMerge(fs, srcs[i], 
-                    FileSystem.getLocal(conf), dst, false, conf, null);
-        }
+  /**
+   * Get all the files in the directories that match the source file pattern
+   * and merge and sort them to only one file on local fs 
+   * srcf is kept.
+   * 
+   * Also adds a string between the files (useful for adding \n
+   * to a text file)
+   * @param srcf: a file pattern specifying source files
+   * @param dstf: a destination local file/directory
+   * @param endline: if an end of line character is added to a text file 
+   * @exception: IOException  
+   * @see org.apache.hadoop.fs.FileSystem.globPaths 
+   */
+  void copyMergeToLocal(String srcf, Path dst, boolean endline) throws IOException {
+    Path [] srcs = fs.globPaths( new Path( srcf ) );
+    for( int i=0; i<srcs.length; i++ ) {
+      if(endline) {
+        FileUtil.copyMerge(fs, srcs[i], 
+                           FileSystem.getLocal(conf), dst, false, conf, "\n");
+      } else {
+        FileUtil.copyMerge(fs, srcs[i], 
+                           FileSystem.getLocal(conf), dst, false, conf, null);
       }
-    }      
-
-    /**
-     * Obtain the indicated file and copy to the local name.
-     * srcf is removed.
-     */
-    void moveToLocal(String srcf, Path dst) throws IOException {
-        System.err.println("Option '-moveToLocal' is not implemented yet.");
     }
+  }      
 
-    /**
-     * Fetch all files that match the file pattern <i>srcf</i> and display
-     * their content on stdout. 
-     * @param srcf: a file pattern specifying source files
-     * @exception: IOException
-     * @see org.apache.hadoop.fs.FileSystem.globPaths 
-     */
-    void cat(String srcf) throws IOException {
-      Path [] srcs = fs.globPaths( new Path( srcf ) );
-      for( int i=0; i<srcs.length; i++ ) {
-        printToStdout(srcs[i]);
-      }
+  /**
+   * Obtain the indicated file and copy to the local name.
+   * srcf is removed.
+   */
+  void moveToLocal(String srcf, Path dst) throws IOException {
+    System.err.println("Option '-moveToLocal' is not implemented yet.");
+  }
+
+  /**
+   * Fetch all files that match the file pattern <i>srcf</i> and display
+   * their content on stdout. 
+   * @param srcf: a file pattern specifying source files
+   * @exception: IOException
+   * @see org.apache.hadoop.fs.FileSystem.globPaths 
+   */
+  void cat(String srcf) throws IOException {
+    Path [] srcs = fs.globPaths( new Path( srcf ) );
+    for( int i=0; i<srcs.length; i++ ) {
+      printToStdout(srcs[i]);
     }
+  }
     
-    /**
-     * Parse the incoming command string
-     * @param cmd
-     * @param pos ignore anything before this pos in cmd
-     * @throws IOException 
-     */
-    private void setReplication(String[] cmd, int pos) throws IOException {
-      if(cmd.length-pos<2 || (cmd.length-pos==2 && cmd[pos].equalsIgnoreCase("-R"))) {
-        System.err.println("Usage: [-R] <repvalue> <path>");
-        System.exit(-1);
-      }
+  /**
+   * Parse the incoming command string
+   * @param cmd
+   * @param pos ignore anything before this pos in cmd
+   * @throws IOException 
+   */
+  private void setReplication(String[] cmd, int pos) throws IOException {
+    if(cmd.length-pos<2 || (cmd.length-pos==2 && cmd[pos].equalsIgnoreCase("-R"))) {
+      System.err.println("Usage: [-R] <repvalue> <path>");
+      System.exit(-1);
+    }
       
-      boolean recursive = false;
-      short rep = 3;
+    boolean recursive = false;
+    short rep = 3;
       
-      if("-R".equalsIgnoreCase(cmd[pos])) {
-        recursive=true;
-        pos++;
+    if("-R".equalsIgnoreCase(cmd[pos])) {
+      recursive=true;
+      pos++;
         
-      }
-      
-      try {
-        rep = Short.parseShort(cmd[pos]);
-        pos++;
-      } catch (NumberFormatException e) {
-        System.err.println("Cannot set replication to: " + cmd[pos]);
-        System.exit(-1);
-      }
+    }
       
-      setReplication(rep, cmd[pos], recursive);
+    try {
+      rep = Short.parseShort(cmd[pos]);
+      pos++;
+    } catch (NumberFormatException e) {
+      System.err.println("Cannot set replication to: " + cmd[pos]);
+      System.exit(-1);
     }
+      
+    setReplication(rep, cmd[pos], recursive);
+  }
     
-    /**
-     * Set the replication for files that match file pattern <i>srcf</i>
-     * if it's a directory and recursive is true,
-     * set replication for all the subdirs and those files too.
-     * @param newRep new replication factor
-     * @param srcf a file pattern specifying source files
-     * @param recursive if need to set replication factor for files in subdirs
-     * @throws IOException  
-     * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
-     */
-    public void setReplication(short newRep, String srcf, boolean recursive)
-        throws IOException {
-      Path[] srcs = fs.globPaths( new Path(srcf) );
-      for( int i=0; i<srcs.length; i++ ) {
-        setReplication( newRep, srcs[i], recursive );
-      }
+  /**
+   * Set the replication for files that match file pattern <i>srcf</i>
+   * if it's a directory and recursive is true,
+   * set replication for all the subdirs and those files too.
+   * @param newRep new replication factor
+   * @param srcf a file pattern specifying source files
+   * @param recursive if need to set replication factor for files in subdirs
+   * @throws IOException  
+   * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
+   */
+  public void setReplication(short newRep, String srcf, boolean recursive)
+    throws IOException {
+    Path[] srcs = fs.globPaths( new Path(srcf) );
+    for( int i=0; i<srcs.length; i++ ) {
+      setReplication( newRep, srcs[i], recursive );
     }
+  }
     
-    private void setReplication(short newRep, Path src, boolean recursive)
-      throws IOException {
+  private void setReplication(short newRep, Path src, boolean recursive)
+    throws IOException {
   	
-    	if(!fs.isDirectory(src)) {
-    		setFileReplication(src, newRep);
-    		return;
-    	}
+    if(!fs.isDirectory(src)) {
+      setFileReplication(src, newRep);
+      return;
+    }
     	
-      Path items[] = fs.listPaths(src);
-      if (items == null) {
-      	throw new IOException("Could not get listing for " + src);
-      } else {
+    Path items[] = fs.listPaths(src);
+    if (items == null) {
+      throw new IOException("Could not get listing for " + src);
+    } else {
 
-      	for (int i = 0; i < items.length; i++) {
-      		Path cur = items[i];
-       		if(!fs.isDirectory(cur)) {
-       			setFileReplication(cur, newRep);
-       		} else if(recursive) {
-       			setReplication(newRep, cur, recursive);
-       		}
-       	}
-       }
+      for (int i = 0; i < items.length; i++) {
+        Path cur = items[i];
+        if(!fs.isDirectory(cur)) {
+          setFileReplication(cur, newRep);
+        } else if(recursive) {
+          setReplication(newRep, cur, recursive);
+        }
+      }
     }
+  }
     
-    /**
-     * Actually set the replication for this file
-     * If it fails either throw IOException or print an error msg
-     * @param file: a file/directory
-     * @param newRep: new replication factor
-     * @throws IOException
-     */
-    private void setFileReplication(Path file, short newRep) throws IOException {
+  /**
+   * Actually set the replication for this file
+   * If it fails either throw IOException or print an error msg
+   * @param file: a file/directory
+   * @param newRep: new replication factor
+   * @throws IOException
+   */
+  private void setFileReplication(Path file, short newRep) throws IOException {
     	
-    	if(fs.setReplication(file, newRep)) {
-    		System.out.println("Replication " + newRep + " set: " + file);
-    	} else {
-    		System.err.println("Could not set replication for: " + file);
-    	}
+    if(fs.setReplication(file, newRep)) {
+      System.out.println("Replication " + newRep + " set: " + file);
+    } else {
+      System.err.println("Could not set replication for: " + file);
     }
+  }
     
     
-    /**
-     * Get a listing of all files in that match the file pattern <i>srcf</i>.
-     * @param srcf a file pattern specifying source files
-     * @param recursive if need to list files in subdirs
-     * @throws IOException  
-     * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
-     */
-    public void ls(String srcf, boolean recursive) throws IOException {
-      Path[] srcs = fs.globPaths( new Path(srcf) );
-      boolean printHeader = (srcs.length == 1) ? true: false;
-      for(int i=0; i<srcs.length; i++) {
-        ls(srcs[i], recursive, printHeader);
-      }
+  /**
+   * Get a listing of all files in that match the file pattern <i>srcf</i>.
+   * @param srcf a file pattern specifying source files
+   * @param recursive if need to list files in subdirs
+   * @throws IOException  
+   * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
+   */
+  public void ls(String srcf, boolean recursive) throws IOException {
+    Path[] srcs = fs.globPaths( new Path(srcf) );
+    boolean printHeader = (srcs.length == 1) ? true: false;
+    for(int i=0; i<srcs.length; i++) {
+      ls(srcs[i], recursive, printHeader);
     }
+  }
 
-    /* list all files under the directory <i>src</i>*/
-    private void ls(Path src, boolean recursive, boolean printHeader ) throws IOException {
-        Path items[] = fs.listPaths(src);
-        if (items == null) {
-            throw new IOException("Could not get listing for " + src);
-        } else {
-            if(!recursive && printHeader ) {
-            	System.out.println("Found " + items.length + " items");
-            }
-            for (int i = 0; i < items.length; i++) {
-                Path cur = items[i];
-                System.out.println(cur + "\t" 
-                                    + (fs.isDirectory(cur) ? 
-                                        "<dir>" : 
-                                        ("<r " + fs.getReplication(cur) 
-                                            + ">\t" + fs.getLength(cur))));
-                if(recursive && fs.isDirectory(cur)) {
-                  ls(cur, recursive, printHeader);
-                }
-            }
+  /* list all files under the directory <i>src</i>*/
+  private void ls(Path src, boolean recursive, boolean printHeader ) throws IOException {
+    Path items[] = fs.listPaths(src);
+    if (items == null) {
+      throw new IOException("Could not get listing for " + src);
+    } else {
+      if(!recursive && printHeader ) {
+        System.out.println("Found " + items.length + " items");
+      }
+      for (int i = 0; i < items.length; i++) {
+        Path cur = items[i];
+        System.out.println(cur + "\t" 
+                           + (fs.isDirectory(cur) ? 
+                              "<dir>" : 
+                              ("<r " + fs.getReplication(cur) 
+                               + ">\t" + fs.getLength(cur))));
+        if(recursive && fs.isDirectory(cur)) {
+          ls(cur, recursive, printHeader);
         }
+      }
     }
+  }
 
-    /**
-     * Show the size of all files that match the file pattern <i>src</i>
-     * @param src a file pattern specifying source files
-     * @throws IOException  
-     * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
-     */
-    public void du(String src) throws IOException {
-        Path items[] = fs.listPaths( fs.globPaths( new Path(src) ) );
-        if (items == null) {
-            throw new IOException("Could not get listing for " + src);
-        } else {
-            System.out.println("Found " + items.length + " items");
-            for (int i = 0; i < items.length; i++) {
-              Path cur = items[i];
-              System.out.println(cur + "\t" + fs.getContentLength(cur));
-            }
-        }
+  /**
+   * Show the size of all files that match the file pattern <i>src</i>
+   * @param src a file pattern specifying source files
+   * @throws IOException  
+   * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
+   */
+  public void du(String src) throws IOException {
+    Path items[] = fs.listPaths( fs.globPaths( new Path(src) ) );
+    if (items == null) {
+      throw new IOException("Could not get listing for " + src);
+    } else {
+      System.out.println("Found " + items.length + " items");
+      for (int i = 0; i < items.length; i++) {
+        Path cur = items[i];
+        System.out.println(cur + "\t" + fs.getContentLength(cur));
+      }
     }
+  }
     
-    /**
-     * Show the summary disk usage of each dir/file 
-     * that matches the file pattern <i>src</i>
-     * @param src a file pattern specifying source files
-     * @throws IOException  
-     * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
-     */
-    public void dus(String src) throws IOException {
-      Path paths[] = fs.globPaths( new Path(src) );
-      if( paths==null && paths.length==0 ) {
-        throw new IOException( "dus: No match: " + src );
-      }
-      for(int i=0; i<paths.length; i++) {
-        Path items[] = fs.listPaths( paths[i] );
-        if (items != null) {
-          long totalSize=0;
-          for(int j=0; j<items.length; j++) {
-            totalSize += fs.getContentLength(items[j]);
-          }
-          String pathStr = paths[i].toString();
-          System.out.println(
-              ("".equals(pathStr)?".":pathStr) + "\t" + totalSize);
+  /**
+   * Show the summary disk usage of each dir/file 
+   * that matches the file pattern <i>src</i>
+   * @param src a file pattern specifying source files
+   * @throws IOException  
+   * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
+   */
+  public void dus(String src) throws IOException {
+    Path paths[] = fs.globPaths( new Path(src) );
+    if( paths==null && paths.length==0 ) {
+      throw new IOException( "dus: No match: " + src );
+    }
+    for(int i=0; i<paths.length; i++) {
+      Path items[] = fs.listPaths( paths[i] );
+      if (items != null) {
+        long totalSize=0;
+        for(int j=0; j<items.length; j++) {
+          totalSize += fs.getContentLength(items[j]);
         }
+        String pathStr = paths[i].toString();
+        System.out.println(
+                           ("".equals(pathStr)?".":pathStr) + "\t" + totalSize);
       }
     }
+  }
 
-    /**
-     * Create the given dir
-     */
-    public void mkdir(String src) throws IOException {
-        Path f = new Path(src);
-        if (!fs.mkdirs(f)) {
-          throw new IOException("Mkdirs failed to create " + src);
-        }
+  /**
+   * Create the given dir
+   */
+  public void mkdir(String src) throws IOException {
+    Path f = new Path(src);
+    if (!fs.mkdirs(f)) {
+      throw new IOException("Mkdirs failed to create " + src);
     }
+  }
     
-    /**
-     * Move files that match the file pattern <i>srcf</i>
-     * to a destination file.
-     * When moving mutiple files, the destination must be a directory. 
-     * Otherwise, IOException is thrown.
-     * @param srcf a file pattern specifying source files
-     * @param dstf a destination local file/directory 
-     * @throws IOException  
-     * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
-     */
-    public void rename(String srcf, String dstf) throws IOException {
-      Path [] srcs = fs.globPaths( new Path(srcf) );
-      Path dst = new Path(dstf);
-      if( srcs.length > 1 && !fs.isDirectory(dst)) {
-        throw new IOException( "When moving multiple files, " 
-            + "destination should be a directory." );
-      }
-      for( int i=0; i<srcs.length; i++ ) {
-        if (fs.rename(srcs[i], dst)) {
-            System.out.println("Renamed " + srcs[i] + " to " + dstf);
-        } else {
-            throw new IOException("Rename failed " + srcs[i]);
-        }
+  /**
+   * Move files that match the file pattern <i>srcf</i>
+   * to a destination file.
+   * When moving mutiple files, the destination must be a directory. 
+   * Otherwise, IOException is thrown.
+   * @param srcf a file pattern specifying source files
+   * @param dstf a destination local file/directory 
+   * @throws IOException  
+   * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
+   */
+  public void rename(String srcf, String dstf) throws IOException {
+    Path [] srcs = fs.globPaths( new Path(srcf) );
+    Path dst = new Path(dstf);
+    if( srcs.length > 1 && !fs.isDirectory(dst)) {
+      throw new IOException( "When moving multiple files, " 
+                             + "destination should be a directory." );
+    }
+    for( int i=0; i<srcs.length; i++ ) {
+      if (fs.rename(srcs[i], dst)) {
+        System.out.println("Renamed " + srcs[i] + " to " + dstf);
+      } else {
+        throw new IOException("Rename failed " + srcs[i]);
       }
     }
+  }
 
-    /**
-     * Move/rename file(s) to a destination file. Multiple source
-     * files can be specified. The destination is the last element of
-     * the argvp[] array.
-     * If multiple source files are specified, then the destination 
-     * must be a directory. Otherwise, IOException is thrown.
-     * @exception: IOException  
-     */
-    private int rename(String argv[], Configuration conf) throws IOException {
-      int i = 0;
-      int exitCode = 0;
-      String cmd = argv[i++];  
-      String dest = argv[argv.length-1];
-      //
-      // If the user has specified multiple source files, then
-      // the destination has to be a directory
-      //
-      if (argv.length > 3) {
-        Path dst = new Path(dest);
-        if (!fs.isDirectory(dst)) {
-          throw new IOException( "When moving multiple files, " 
-            + "destination " + dest + " should be a directory." );
-        }
+  /**
+   * Move/rename file(s) to a destination file. Multiple source
+   * files can be specified. The destination is the last element of
+   * the argvp[] array.
+   * If multiple source files are specified, then the destination 
+   * must be a directory. Otherwise, IOException is thrown.
+   * @exception: IOException  
+   */
+  private int rename(String argv[], Configuration conf) throws IOException {
+    int i = 0;
+    int exitCode = 0;
+    String cmd = argv[i++];  
+    String dest = argv[argv.length-1];
+    //
+    // If the user has specified multiple source files, then
+    // the destination has to be a directory
+    //
+    if (argv.length > 3) {
+      Path dst = new Path(dest);
+      if (!fs.isDirectory(dst)) {
+        throw new IOException( "When moving multiple files, " 
+                               + "destination " + dest + " should be a directory." );
       }
-      //
-      // for each source file, issue the rename
-      //
-      for (; i < argv.length - 1; i++) {
+    }
+    //
+    // for each source file, issue the rename
+    //
+    for (; i < argv.length - 1; i++) {
+      try {
+        //
+        // issue the rename to the fs
+        //
+        rename(argv[i], dest);
+      } catch (RemoteException e) {
+        //
+        // This is a error returned by hadoop server. Print
+        // out the first line of the error mesage.
+        //
+        exitCode = -1;
         try {
-          //
-          // issue the rename to the fs
-          //
-          rename(argv[i], dest);
-        } catch (RemoteException e) {
-          //
-          // This is a error returned by hadoop server. Print
-          // out the first line of the error mesage.
-          //
-          exitCode = -1;
-          try {
-            String[] content;
-            content = e.getLocalizedMessage().split("\n");
-            System.err.println(cmd.substring(1) + ": " +
-                               content[0]);
-          } catch (Exception ex) {
-            System.err.println(cmd.substring(1) + ": " +
-                               ex.getLocalizedMessage());
-          }
-        } catch (IOException e) {
-          //
-          // IO exception encountered locally.
-          //
-          exitCode = -1;
+          String[] content;
+          content = e.getLocalizedMessage().split("\n");
+          System.err.println(cmd.substring(1) + ": " +
+                             content[0]);
+        } catch (Exception ex) {
           System.err.println(cmd.substring(1) + ": " +
-                             e.getLocalizedMessage());
+                             ex.getLocalizedMessage());
         }
+      } catch (IOException e) {
+        //
+        // IO exception encountered locally.
+        //
+        exitCode = -1;
+        System.err.println(cmd.substring(1) + ": " +
+                           e.getLocalizedMessage());
       }
-      return exitCode;
     }
+    return exitCode;
+  }
 
-    /**
-     * Copy files that match the file pattern <i>srcf</i>
-     * to a destination file.
-     * When copying mutiple files, the destination must be a directory. 
-     * Otherwise, IOException is thrown.
-     * @param srcf a file pattern specifying source files
-     * @param dstf a destination local file/directory 
-     * @throws IOException  
-     * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
-     */
-    public void copy(String srcf, String dstf, Configuration conf) throws IOException {
-      Path [] srcs = fs.globPaths( new Path(srcf) );
-      Path dst = new Path(dstf);
-      if( srcs.length > 1 && !fs.isDirectory(dst)) {
-        throw new IOException( "When copying multiple files, " 
-            + "destination should be a directory." );
-      }
-      for( int i=0; i<srcs.length; i++ ) {
-        FileUtil.copy(fs, srcs[i], fs, dst, false, conf);
-      }
+  /**
+   * Copy files that match the file pattern <i>srcf</i>
+   * to a destination file.
+   * When copying mutiple files, the destination must be a directory. 
+   * Otherwise, IOException is thrown.
+   * @param srcf a file pattern specifying source files
+   * @param dstf a destination local file/directory 
+   * @throws IOException  
+   * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
+   */
+  public void copy(String srcf, String dstf, Configuration conf) throws IOException {
+    Path [] srcs = fs.globPaths( new Path(srcf) );
+    Path dst = new Path(dstf);
+    if( srcs.length > 1 && !fs.isDirectory(dst)) {
+      throw new IOException( "When copying multiple files, " 
+                             + "destination should be a directory." );
+    }
+    for( int i=0; i<srcs.length; i++ ) {
+      FileUtil.copy(fs, srcs[i], fs, dst, false, conf);
     }
+  }
 
-    /**
-     * Copy file(s) to a destination file. Multiple source
-     * files can be specified. The destination is the last element of
-     * the argvp[] array.
-     * If multiple source files are specified, then the destination 
-     * must be a directory. Otherwise, IOException is thrown.
-     * @exception: IOException  
-     */
-    private int copy(String argv[], Configuration conf) throws IOException {
-      int i = 0;
-      int exitCode = 0;
-      String cmd = argv[i++];  
-      String dest = argv[argv.length-1];
-      //
-      // If the user has specified multiple source files, then
-      // the destination has to be a directory
-      //
-      if (argv.length > 3) {
-        Path dst = new Path(dest);
-        if (!fs.isDirectory(dst)) {
-          throw new IOException( "When copying multiple files, " 
-            + "destination " + dest + " should be a directory." );
-        }
+  /**
+   * Copy file(s) to a destination file. Multiple source
+   * files can be specified. The destination is the last element of
+   * the argvp[] array.
+   * If multiple source files are specified, then the destination 
+   * must be a directory. Otherwise, IOException is thrown.
+   * @exception: IOException  
+   */
+  private int copy(String argv[], Configuration conf) throws IOException {
+    int i = 0;
+    int exitCode = 0;
+    String cmd = argv[i++];  
+    String dest = argv[argv.length-1];
+    //
+    // If the user has specified multiple source files, then
+    // the destination has to be a directory
+    //
+    if (argv.length > 3) {
+      Path dst = new Path(dest);
+      if (!fs.isDirectory(dst)) {
+        throw new IOException( "When copying multiple files, " 
+                               + "destination " + dest + " should be a directory." );
       }
-      //
-      // for each source file, issue the copy
-      //
-      for (; i < argv.length - 1; i++) {
+    }
+    //
+    // for each source file, issue the copy
+    //
+    for (; i < argv.length - 1; i++) {
+      try {
+        //
+        // issue the copy to the fs
+        //
+        copy(argv[i], dest, conf);
+      } catch (RemoteException e) {
+        //
+        // This is a error returned by hadoop server. Print
+        // out the first line of the error mesage.
+        //
+        exitCode = -1;
         try {
-          //
-          // issue the copy to the fs
-          //
-          copy(argv[i], dest, conf);
-        } catch (RemoteException e) {
-          //
-          // This is a error returned by hadoop server. Print
-          // out the first line of the error mesage.
-          //
-          exitCode = -1;
-          try {
-            String[] content;
-            content = e.getLocalizedMessage().split("\n");
-            System.err.println(cmd.substring(1) + ": " +
-                               content[0]);
-          } catch (Exception ex) {
-            System.err.println(cmd.substring(1) + ": " +
-                               ex.getLocalizedMessage());
-          }
-        } catch (IOException e) {
-          //
-          // IO exception encountered locally.
-          //
-          exitCode = -1;
+          String[] content;
+          content = e.getLocalizedMessage().split("\n");
+          System.err.println(cmd.substring(1) + ": " +
+                             content[0]);
+        } catch (Exception ex) {
           System.err.println(cmd.substring(1) + ": " +
-                             e.getLocalizedMessage());
+                             ex.getLocalizedMessage());
         }
+      } catch (IOException e) {
+        //
+        // IO exception encountered locally.
+        //
+        exitCode = -1;
+        System.err.println(cmd.substring(1) + ": " +
+                           e.getLocalizedMessage());
       }
-      return exitCode;
     }
+    return exitCode;
+  }
 
-    /**
-     * Delete all files that match the file pattern <i>srcf</i>.
-     * @param srcf a file pattern specifying source files
-     * @param recursive if need to delete subdirs
-     * @throws IOException  
-     * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
-     */
-    public void delete(String srcf, boolean recursive) throws IOException {
-      Path [] srcs = fs.globPaths( new Path(srcf) );
-      for( int i=0; i<srcs.length; i++ ) {
-        delete(srcs[i], recursive);
-      }
+  /**
+   * Delete all files that match the file pattern <i>srcf</i>.
+   * @param srcf a file pattern specifying source files
+   * @param recursive if need to delete subdirs
+   * @throws IOException  
+   * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
+   */
+  public void delete(String srcf, boolean recursive) throws IOException {
+    Path [] srcs = fs.globPaths( new Path(srcf) );
+    for( int i=0; i<srcs.length; i++ ) {
+      delete(srcs[i], recursive);
     }
+  }
     
-    /* delete a file */
-    private void delete(Path src, boolean recursive ) throws IOException {
-      if (fs.isDirectory(src) && !recursive) {
-        throw new IOException("Cannot remove directory \"" + src +
-                           "\", use -rmr instead");
-      }
-
-      if (trash.moveToTrash(src)) {
-        System.out.println("Moved to trash: " + src);
-        return;
-      }
-      if (fs.delete(src)) {
-        System.out.println("Deleted " + src);
-      } else {
-        throw new IOException("Delete failed " + src);
-      }
+  /* delete a file */
+  private void delete(Path src, boolean recursive ) throws IOException {
+    if (fs.isDirectory(src) && !recursive) {
+      throw new IOException("Cannot remove directory \"" + src +
+                            "\", use -rmr instead");
     }
 
-    private void expunge() throws IOException {
-      trash.expunge();
-      trash.checkpoint();
+    if (trash.moveToTrash(src)) {
+      System.out.println("Moved to trash: " + src);
+      return;
     }
+    if (fs.delete(src)) {
+      System.out.println("Deleted " + src);
+    } else {
+      throw new IOException("Delete failed " + src);
+    }
+  }
 
-    /**
-     * Return an abbreviated English-language desc of the byte length
-     */
-    public static String byteDesc(long len) {
-        double val = 0.0;
-        String ending = "";
-        if (len < 1024 * 1024) {
-            val = (1.0 * len) / 1024;
-            ending = " k";
-        } else if (len < 1024 * 1024 * 1024) {
-            val = (1.0 * len) / (1024 * 1024);
-            ending = " MB";
-        } else if (len < 128L * 1024 * 1024 * 1024 ) {
-            val = (1.0 * len) / (1024 * 1024 * 1024);
-            ending = " GB";
-        } else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
-            val = (1.0 * len) / (1024L * 1024 * 1024 * 1024);
-            ending = " TB";
-        } else {
-            val = (1.0 * len) / (1024L * 1024 * 1024 * 1024 * 1024);
-            ending = " PB";
-        }
-        return limitDecimal(val, 2) + ending;
+  private void expunge() throws IOException {
+    trash.expunge();
+    trash.checkpoint();
+  }
+
+  /**
+   * Return an abbreviated English-language desc of the byte length
+   */
+  public static String byteDesc(long len) {
+    double val = 0.0;
+    String ending = "";
+    if (len < 1024 * 1024) {
+      val = (1.0 * len) / 1024;
+      ending = " k";
+    } else if (len < 1024 * 1024 * 1024) {
+      val = (1.0 * len) / (1024 * 1024);
+      ending = " MB";
+    } else if (len < 128L * 1024 * 1024 * 1024 ) {
+      val = (1.0 * len) / (1024 * 1024 * 1024);
+      ending = " GB";
+    } else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
+      val = (1.0 * len) / (1024L * 1024 * 1024 * 1024);
+      ending = " TB";
+    } else {
+      val = (1.0 * len) / (1024L * 1024 * 1024 * 1024 * 1024);
+      ending = " PB";
     }
+    return limitDecimal(val, 2) + ending;
+  }
 
-    public static String limitDecimal(double d, int placesAfterDecimal) {
-        String strVal = Double.toString(d);
-        int decpt = strVal.indexOf(".");
-        if (decpt >= 0) {
-            strVal = strVal.substring(0, Math.min(strVal.length(), decpt + 1 + placesAfterDecimal));
-        }
-        return strVal;
+  public static String limitDecimal(double d, int placesAfterDecimal) {
+    String strVal = Double.toString(d);
+    int decpt = strVal.indexOf(".");
+    if (decpt >= 0) {
+      strVal = strVal.substring(0, Math.min(strVal.length(), decpt + 1 + placesAfterDecimal));
     }
+    return strVal;
+  }
 
-    private void printHelp(String cmd) {
-        String summary = "hadoop fs is the command to execute fs commands. " +
-            "The full syntax is: \n\n" +
-            "hadoop fs [-fs <local | file system URI>] [-conf <configuration file>]\n\t" +
-            "[-D <property=value>] [-ls <path>] [-lsr <path>] [-du <path>]\n\t" + 
-            "[-dus <path>] [-mv <src> <dst>] [-cp <src> <dst>] [-rm <src>]\n\t" + 
-            "[-rmr <src>] [-put <localsrc> <dst>] [-copyFromLocal <localsrc> <dst>]\n\t" +
-            "[-moveFromLocal <localsrc> <dst>] [-get <src> <localdst>]\n\t" +
-            "[-getmerge <src> <localdst> [addnl]] [-cat <src>]\n\t" +
-            "[-copyToLocal <src><localdst>] [-moveToLocal <src> <localdst>]\n\t" +
-            "[-mkdir <path>] [-report] [-setrep [-R] <rep> <path/file>]\n" +
-            "[-help [cmd]]\n"; 
+  private void printHelp(String cmd) {
+    String summary = "hadoop fs is the command to execute fs commands. " +
+      "The full syntax is: \n\n" +
+      "hadoop fs [-fs <local | file system URI>] [-conf <configuration file>]\n\t" +
+      "[-D <property=value>] [-ls <path>] [-lsr <path>] [-du <path>]\n\t" + 
+      "[-dus <path>] [-mv <src> <dst>] [-cp <src> <dst>] [-rm <src>]\n\t" + 
+      "[-rmr <src>] [-put <localsrc> <dst>] [-copyFromLocal <localsrc> <dst>]\n\t" +
+      "[-moveFromLocal <localsrc> <dst>] [-get <src> <localdst>]\n\t" +
+      "[-getmerge <src> <localdst> [addnl]] [-cat <src>]\n\t" +
+      "[-copyToLocal <src><localdst>] [-moveToLocal <src> <localdst>]\n\t" +
+      "[-mkdir <path>] [-report] [-setrep [-R] <rep> <path/file>]\n" +
+      "[-help [cmd]]\n"; 
 
-        String conf ="-conf <configuration file>:  Specify an application configuration file.";
+    String conf ="-conf <configuration file>:  Specify an application configuration file.";
  
-        String D = "-D <property=value>:  Use value for given property.";
+    String D = "-D <property=value>:  Use value for given property.";
   
-        String fs = "-fs [local | <file system URI>]: \tSpecify the file system to use.\n" + 
-            "\t\tIf not specified, the current configuration is used, \n" +
-            "\t\ttaken from the following, in increasing precedence: \n" + 
-            "\t\t\thadoop-default.xml inside the hadoop jar file \n" +
-            "\t\t\thadoop-default.xml in $HADOOP_CONF_DIR \n" +
-            "\t\t\thadoop-site.xml in $HADOOP_CONF_DIR \n" +
-            "\t\t'local' means use the local file system as your DFS. \n" +
-            "\t\t<file system URI> specifies a particular file system to \n" +
-            "\t\tcontact. This argument is optional but if used must appear\n" +
-            "\t\tappear first on the command line.  Exactly one additional\n" +
-            "\t\targument must be specified. \n";
+    String fs = "-fs [local | <file system URI>]: \tSpecify the file system to use.\n" + 
+      "\t\tIf not specified, the current configuration is used, \n" +
+      "\t\ttaken from the following, in increasing precedence: \n" + 
+      "\t\t\thadoop-default.xml inside the hadoop jar file \n" +
+      "\t\t\thadoop-default.xml in $HADOOP_CONF_DIR \n" +
+      "\t\t\thadoop-site.xml in $HADOOP_CONF_DIR \n" +
+      "\t\t'local' means use the local file system as your DFS. \n" +
+      "\t\t<file system URI> specifies a particular file system to \n" +
+      "\t\tcontact. This argument is optional but if used must appear\n" +
+      "\t\tappear first on the command line.  Exactly one additional\n" +
+      "\t\targument must be specified. \n";
 
         
-        String ls = "-ls <path>: \tList the contents that match the specified file pattern. If\n" + 
-            "\t\tpath is not specified, the contents of /user/<currentUser>\n" +
-            "\t\twill be listed. Directory entries are of the form \n" +
-            "\t\t\tdirName (full path) <dir> \n" +
-            "\t\tand file entries are of the form \n" + 
-            "\t\t\tfileName(full path) <r n> size \n" +
-            "\t\twhere n is the number of replicas specified for the file \n" + 
-            "\t\tand size is the size of the file, in bytes.\n";
+    String ls = "-ls <path>: \tList the contents that match the specified file pattern. If\n" + 
+      "\t\tpath is not specified, the contents of /user/<currentUser>\n" +
+      "\t\twill be listed. Directory entries are of the form \n" +
+      "\t\t\tdirName (full path) <dir> \n" +
+      "\t\tand file entries are of the form \n" + 
+      "\t\t\tfileName(full path) <r n> size \n" +
+      "\t\twhere n is the number of replicas specified for the file \n" + 
+      "\t\tand size is the size of the file, in bytes.\n";
 
-        String lsr = "-lsr <path>: \tRecursively list the contents that match the specified\n" +
-            "\t\tfile pattern.  Behaves very similarly to hadoop fs -ls,\n" + 
-            "\t\texcept that the data is shown for all the entries in the\n" +
-            "\t\tsubtree.\n";
+    String lsr = "-lsr <path>: \tRecursively list the contents that match the specified\n" +
+      "\t\tfile pattern.  Behaves very similarly to hadoop fs -ls,\n" + 
+      "\t\texcept that the data is shown for all the entries in the\n" +
+      "\t\tsubtree.\n";
 
-        String du = "-du <path>: \tShow the amount of space, in bytes, used by the files that \n" +
-            "\t\tmatch the specified file pattern.  Equivalent to the unix\n" + 
-            "\t\tcommand \"du -sb <path>/*\" in case of a directory, \n" +
-            "\t\tand to \"du -b <path>\" in case of a file.\n" +
-            "\t\tThe output is in the form \n" + 
-            "\t\t\tname(full path) size (in bytes)\n"; 
+    String du = "-du <path>: \tShow the amount of space, in bytes, used by the files that \n" +
+      "\t\tmatch the specified file pattern.  Equivalent to the unix\n" + 
+      "\t\tcommand \"du -sb <path>/*\" in case of a directory, \n" +
+      "\t\tand to \"du -b <path>\" in case of a file.\n" +
+      "\t\tThe output is in the form \n" + 
+      "\t\t\tname(full path) size (in bytes)\n"; 
 
-        String dus = "-dus <path>: \tShow the amount of space, in bytes, used by the files that \n" +
-            "\t\tmatch the specified file pattern.  Equivalent to the unix\n" + 
-            "\t\tcommand \"du -sb\"  The output is in the form \n" + 
-            "\t\t\tname(full path) size (in bytes)\n"; 
+    String dus = "-dus <path>: \tShow the amount of space, in bytes, used by the files that \n" +
+      "\t\tmatch the specified file pattern.  Equivalent to the unix\n" + 
+      "\t\tcommand \"du -sb\"  The output is in the form \n" + 
+      "\t\t\tname(full path) size (in bytes)\n"; 
     
-        String mv = "-mv <src> <dst>:   Move files that match the specified file pattern <src>\n" +
-            "\t\tto a destination <dst>.  When moving multiple files, the \n" +
-            "\t\tdestination must be a directory. \n";
+    String mv = "-mv <src> <dst>:   Move files that match the specified file pattern <src>\n" +
+      "\t\tto a destination <dst>.  When moving multiple files, the \n" +
+      "\t\tdestination must be a directory. \n";
 
-        String cp = "-cp <src> <dst>:   Copy files that match the file pattern <src> to a \n" +
-            "\t\tdestination.  When copying multiple files, the destination\n" +
-            "\t\tmust be a directory. \n";
+    String cp = "-cp <src> <dst>:   Copy files that match the file pattern <src> to a \n" +
+      "\t\tdestination.  When copying multiple files, the destination\n" +
+      "\t\tmust be a directory. \n";
 
-        String rm = "-rm <src>: \tDelete all files that match the specified file pattern.\n" +
-            "\t\tEquivlent to the Unix command \"rm <src>\"\n";
+    String rm = "-rm <src>: \tDelete all files that match the specified file pattern.\n" +
+      "\t\tEquivlent to the Unix command \"rm <src>\"\n";
 
-        String rmr = "-rmr <src>: \tRemove all directories which match the specified file \n" +
-            "\t\tpattern. Equivlent to the Unix command \"rm -rf <src>\"\n";
+    String rmr = "-rmr <src>: \tRemove all directories which match the specified file \n" +
+      "\t\tpattern. Equivlent to the Unix command \"rm -rf <src>\"\n";
 
-        String put = "-put <localsrc> <dst>: \tCopy a single file from the local file system \n" +
-            "\t\tinto fs. \n";
+    String put = "-put <localsrc> <dst>: \tCopy a single file from the local file system \n" +
+      "\t\tinto fs. \n";
 
-        String copyFromLocal = "-copyFromLocal <localsrc> <dst>:  Identical to the -put command.\n";
+    String copyFromLocal = "-copyFromLocal <localsrc> <dst>:  Identical to the -put command.\n";
 
-        String moveFromLocal = "-moveFromLocal <localsrc> <dst>:  Same as -put, except that the source is\n" +
-            "\t\tdeleted after it's copied.\n"; 
+    String moveFromLocal = "-moveFromLocal <localsrc> <dst>:  Same as -put, except that the source is\n" +
+      "\t\tdeleted after it's copied.\n"; 
 
-        String get = "-get <src> <localdst>:  Copy files that match the file pattern <src> \n" +
-            "\t\tto the local name.  <src> is kept.  When copying mutiple, \n" +
-            "\t\tfiles, the destination must be a directory. \n";
+    String get = "-get <src> <localdst>:  Copy files that match the file pattern <src> \n" +
+      "\t\tto the local name.  <src> is kept.  When copying mutiple, \n" +
+      "\t\tfiles, the destination must be a directory. \n";
 
-        String getmerge = "-getmerge <src> <localdst>:  Get all the files in the directories that \n" +
-            "\t\tmatch the source file pattern and merge and sort them to only\n" +
-            "\t\tone file on local fs. <src> is kept.\n";
+    String getmerge = "-getmerge <src> <localdst>:  Get all the files in the directories that \n" +
+      "\t\tmatch the source file pattern and merge and sort them to only\n" +
+      "\t\tone file on local fs. <src> is kept.\n";
 
-        String cat = "-cat <src>: \tFetch all files that match the file pattern <src> \n" +
-            "\t\tand display their content on stdout.\n";
+    String cat = "-cat <src>: \tFetch all files that match the file pattern <src> \n" +
+      "\t\tand display their content on stdout.\n";
         
-        String copyToLocal = "-copyToLocal <src> <localdst>:  Identical to the -get command.\n";
+    String copyToLocal = "-copyToLocal <src> <localdst>:  Identical to the -get command.\n";
 
-        String moveToLocal = "-moveToLocal <src> <localdst>:  Not implemented yet \n";
+    String moveToLocal = "-moveToLocal <src> <localdst>:  Not implemented yet \n";
         
-        String mkdir = "-mkdir <path>: \tCreate a directory in specified location. \n";
+    String mkdir = "-mkdir <path>: \tCreate a directory in specified location. \n";
 
-        String setrep = "-setrep [-R] <rep> <path/file>:  Set the replication level of a file. \n" +
-            "\t\tThe -R flag requests a recursive change of replication level \n" + 
-            "\t\tfor an entire tree.\n"; 
+    String setrep = "-setrep [-R] <rep> <path/file>:  Set the replication level of a file. \n" +
+      "\t\tThe -R flag requests a recursive change of replication level \n" + 
+      "\t\tfor an entire tree.\n"; 
         
-        String help = "-help [cmd]: \tDisplays help for given command or all commands if none\n" +
-            "\t\tis specified.\n";
+    String help = "-help [cmd]: \tDisplays help for given command or all commands if none\n" +
+      "\t\tis specified.\n";
 
-        if ("fs".equals(cmd)) {
-            System.out.println(fs);
-        } else if ("conf".equals(cmd)) {
-            System.out.println(conf);
-        } else if ("D".equals(cmd)) {
-            System.out.println(D);
-        } else if ("ls".equals(cmd)) {
-            System.out.println(ls);
-        } else if ("lsr".equals(cmd)) {
-            System.out.println(lsr);
-        } else if ("du".equals(cmd)) {
-            System.out.println(du);
-        } else if ("dus".equals(cmd)) {
-            System.out.println(dus);
-        } else if ("rm".equals(cmd)) {
-            System.out.println(rm);
-        } else if ("rmr".equals(cmd)) {
-            System.out.println(rmr);
-        } else if ("mkdir".equals(cmd)) {
-            System.out.println(mkdir);
-        } else if ("mv".equals(cmd)) {
-            System.out.println(mv);
-        } else if ("cp".equals(cmd)) {
-            System.out.println(cp);
-        } else if ("put".equals(cmd)) {
-            System.out.println(put);
-        } else if ("copyFromLocal".equals(cmd)) {
-            System.out.println(copyFromLocal);
-        } else if ("moveFromLocal".equals(cmd)) {
-            System.out.println(moveFromLocal);
-        } else if ("get".equals(cmd)) {
-            System.out.println(get);
-        } else if ("getmerge".equals(cmd)) {
-            System.out.println(getmerge);
-        } else if ("copyToLocal".equals(cmd)) {
-            System.out.println(copyToLocal);
-        } else if ("moveToLocal".equals(cmd)) {
-            System.out.println(moveToLocal);
-        } else if ("cat".equals(cmd)) {
-            System.out.println(cat);
-        } else if ("get".equals(cmd)) {
-            System.out.println(get);
-        } else if ("setrep".equals(cmd)) {
-            System.out.println(setrep);
-        } else if ("help".equals(cmd)) {
-            System.out.println(help);
-        } else {
-            System.out.println(summary);
-            System.out.println(fs);
-            System.out.println(ls);
-            System.out.println(lsr);
-            System.out.println(du);
-            System.out.println(dus);
-            System.out.println(mv);
-            System.out.println(cp);
-            System.out.println(rm);
-            System.out.println(rmr);
-            System.out.println(put);
-            System.out.println(copyFromLocal);
-            System.out.println(moveFromLocal);
-            System.out.println(get);
-            System.out.println(getmerge);
-            System.out.println(cat);
-            System.out.println(copyToLocal);
-            System.out.println(moveToLocal);
-            System.out.println(mkdir);
-            System.out.println(setrep);
-            System.out.println(help);
-        }        
+    if ("fs".equals(cmd)) {
+      System.out.println(fs);
+    } else if ("conf".equals(cmd)) {
+      System.out.println(conf);
+    } else if ("D".equals(cmd)) {
+      System.out.println(D);
+    } else if ("ls".equals(cmd)) {
+      System.out.println(ls);
+    } else if ("lsr".equals(cmd)) {
+      System.out.println(lsr);
+    } else if ("du".equals(cmd)) {
+      System.out.println(du);
+    } else if ("dus".equals(cmd)) {
+      System.out.println(dus);
+    } else if ("rm".equals(cmd)) {
+      System.out.println(rm);
+    } else if ("rmr".equals(cmd)) {
+      System.out.println(rmr);
+    } else if ("mkdir".equals(cmd)) {
+      System.out.println(mkdir);
+    } else if ("mv".equals(cmd)) {
+      System.out.println(mv);
+    } else if ("cp".equals(cmd)) {
+      System.out.println(cp);
+    } else if ("put".equals(cmd)) {
+      System.out.println(put);
+    } else if ("copyFromLocal".equals(cmd)) {
+      System.out.println(copyFromLocal);
+    } else if ("moveFromLocal".equals(cmd)) {
+      System.out.println(moveFromLocal);
+    } else if ("get".equals(cmd)) {
+      System.out.println(get);
+    } else if ("getmerge".equals(cmd)) {
+      System.out.println(getmerge);
+    } else if ("copyToLocal".equals(cmd)) {
+      System.out.println(copyToLocal);
+    } else if ("moveToLocal".equals(cmd)) {
+      System.out.println(moveToLocal);
+    } else if ("cat".equals(cmd)) {
+      System.out.println(cat);
+    } else if ("get".equals(cmd)) {
+      System.out.println(get);
+    } else if ("setrep".equals(cmd)) {
+      System.out.println(setrep);
+    } else if ("help".equals(cmd)) {
+      System.out.println(help);
+    } else {
+      System.out.println(summary);
+      System.out.println(fs);
+      System.out.println(ls);
+      System.out.println(lsr);
+      System.out.println(du);
+      System.out.println(dus);
+      System.out.println(mv);
+      System.out.println(cp);
+      System.out.println(rm);
+      System.out.println(rmr);
+      System.out.println(put);
+      System.out.println(copyFromLocal);
+      System.out.println(moveFromLocal);
+      System.out.println(get);
+      System.out.println(getmerge);
+      System.out.println(cat);
+      System.out.println(copyToLocal);
+      System.out.println(moveToLocal);
+      System.out.println(mkdir);
+      System.out.println(setrep);
+      System.out.println(help);
+    }        
 
                            
-    }
+  }
 
-    /**
-     * Apply operation specified by 'cmd' on all parameters
-     * starting from argv[startindex].
-     */
-    private int doall(String cmd, String argv[], Configuration conf, 
-                      int startindex) {
-      int exitCode = 0;
-      int i = startindex;
-      //
-      // for each source file, issue the command
-      //
-      for (; i < argv.length; i++) {
+  /**
+   * Apply operation specified by 'cmd' on all parameters
+   * starting from argv[startindex].
+   */
+  private int doall(String cmd, String argv[], Configuration conf, 
+                    int startindex) {
+    int exitCode = 0;
+    int i = startindex;
+    //
+    // for each source file, issue the command
+    //
+    for (; i < argv.length; i++) {
+      try {
+        //
+        // issue the command to the fs
+        //
+        if ("-cat".equals(cmd)) {
+          cat(argv[i]);
+        } else if ("-mkdir".equals(cmd)) {
+          mkdir(argv[i]);
+        } else if ("-rm".equals(cmd)) {
+          delete(argv[i], false);
+        } else if ("-rmr".equals(cmd)) {
+          delete(argv[i], true);
+        } else if ("-du".equals(cmd)) {
+          du(argv[i]);
+        } else if ("-dus".equals(cmd)) {
+          dus(argv[i]);
+        } else if ("-ls".equals(cmd)) {
+          ls(argv[i], false);
+        } else if ("-lsr".equals(cmd)) {
+          ls(argv[i], true);
+        }
+      } catch (RemoteException e) {
+        //
+        // This is a error returned by hadoop server. Print
+        // out the first line of the error mesage.
+        //
+        exitCode = -1;
         try {
-          //
-          // issue the command to the fs
-          //
-          if ("-cat".equals(cmd)) {
-              cat(argv[i]);
-          } else if ("-mkdir".equals(cmd)) {
-              mkdir(argv[i]);
-          } else if ("-rm".equals(cmd)) {
-              delete(argv[i], false);
-          } else if ("-rmr".equals(cmd)) {
-              delete(argv[i], true);
-          } else if ("-du".equals(cmd)) {
-              du(argv[i]);
-          } else if ("-dus".equals(cmd)) {
-              dus(argv[i]);
-          } else if ("-ls".equals(cmd)) {
-              ls(argv[i], false);
-          } else if ("-lsr".equals(cmd)) {
-              ls(argv[i], true);
-          }
-        } catch (RemoteException e) {
-          //
-          // This is a error returned by hadoop server. Print
-          // out the first line of the error mesage.
-          //
-          exitCode = -1;
-          try {
-            String[] content;
-            content = e.getLocalizedMessage().split("\n");
-            System.err.println(cmd.substring(1) + ": " +
-                               content[0]);
-          } catch (Exception ex) {
-            System.err.println(cmd.substring(1) + ": " +
-                               ex.getLocalizedMessage());
-          }
-        } catch (IOException e) {
-          //
-          // IO exception encountered locally.
-          //
-          exitCode = -1;
+          String[] content;
+          content = e.getLocalizedMessage().split("\n");
           System.err.println(cmd.substring(1) + ": " +
-                             e.getLocalizedMessage());
+                             content[0]);
+        } catch (Exception ex) {
+          System.err.println(cmd.substring(1) + ": " +
+                             ex.getLocalizedMessage());
         }
+      } catch (IOException e) {
+        //
+        // IO exception encountered locally.
+        //
+        exitCode = -1;
+        System.err.println(cmd.substring(1) + ": " +
+                           e.getLocalizedMessage());
       }
-      return exitCode;
     }
+    return exitCode;
+  }
 
-    /**
-     * Displays format of commands.
-     * 
-     */
-    public void printUsage(String cmd) {
-          if ("-fs".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [-fs <local | file system URI>]");
-          } else if ("-conf".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [-conf <configuration file>]");
-          } else if ("-D".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [-D <[property=value>]");
-          } else if ("-ls".equals(cmd) || "-lsr".equals(cmd) ||
-                   "-du".equals(cmd) || "-dus".equals(cmd) || 
-                   "-rm".equals(cmd) || "-rmr".equals(cmd) || 
-                   "-mkdir".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [" + cmd + " <path>]");
-          } else if ("-mv".equals(cmd) || "-cp".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [" + cmd + " <src> <dst>]");
-          } else if ("-put".equals(cmd) || "-copyFromLocal".equals(cmd) ||
-                   "-moveFromLocal".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [" + cmd + " <localsrc> <dst>]");
-          } else if ("-get".equals(cmd) || "-copyToLocal".equals(cmd) ||
-                   "-moveToLocal".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [" + cmd + " [-crc] <src> <localdst>]");
-          } else if ("-cat".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [" + cmd + " <src>]");
-          } else if ("-get".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [" + cmd + " <src> <localdst> [addnl]]");
-          } else if ("-setrep".equals(cmd)) {
-            System.err.println("Usage: java FsShell" + 
-                " [-setrep [-R] <rep> <path/file>]");
-          } else {
-            System.err.println("Usage: java FsShell");
-            System.err.println("           [-fs <local | file system URI>]");
-            System.err.println("           [-conf <configuration file>]");
-            System.err.println("           [-D <[property=value>]");
-            System.err.println("           [-ls <path>]" );
-            System.err.println("           [-lsr <path>]");
-            System.err.println("           [-du <path>]");
-            System.err.println("           [-dus <path>]");
-            System.err.println("           [-mv <src> <dst>]");
-            System.err.println("           [-cp <src> <dst>]");
-            System.err.println("           [-rm <path>]");
-            System.err.println("           [-rmr <path>]");
-            System.err.println("           [-expunge]");
-            System.err.println("           [-put <localsrc> <dst>]");
-            System.err.println("           [-copyFromLocal <localsrc> <dst>]");
-            System.err.println("           [-moveFromLocal <localsrc> <dst>]");
-            System.err.println("           [-get [-crc] <src> <localdst>]");
-            System.err.println("           [-getmerge <src> <localdst> [addnl]]");
-            System.err.println("           [-cat <src>]");
-            System.err.println("           [-copyToLocal [-crc] <src> <localdst>]");
-            System.err.println("           [-moveToLocal [-crc] <src> <localdst>]");
-            System.err.println("           [-mkdir <path>]");
-            System.err.println("           [-setrep [-R] <rep> <path/file>]");
-            System.err.println("           [-help [cmd]]");
-          }
+  /**
+   * Displays format of commands.
+   * 
+   */
+  public void printUsage(String cmd) {
+    if ("-fs".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [-fs <local | file system URI>]");
+    } else if ("-conf".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [-conf <configuration file>]");
+    } else if ("-D".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [-D <[property=value>]");
+    } else if ("-ls".equals(cmd) || "-lsr".equals(cmd) ||
+               "-du".equals(cmd) || "-dus".equals(cmd) || 
+               "-rm".equals(cmd) || "-rmr".equals(cmd) || 
+               "-mkdir".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [" + cmd + " <path>]");
+    } else if ("-mv".equals(cmd) || "-cp".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [" + cmd + " <src> <dst>]");
+    } else if ("-put".equals(cmd) || "-copyFromLocal".equals(cmd) ||
+               "-moveFromLocal".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [" + cmd + " <localsrc> <dst>]");
+    } else if ("-get".equals(cmd) || "-copyToLocal".equals(cmd) ||
+               "-moveToLocal".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [" + cmd + " [-crc] <src> <localdst>]");
+    } else if ("-cat".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [" + cmd + " <src>]");
+    } else if ("-get".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [" + cmd + " <src> <localdst> [addnl]]");
+    } else if ("-setrep".equals(cmd)) {
+      System.err.println("Usage: java FsShell" + 
+                         " [-setrep [-R] <rep> <path/file>]");
+    } else {
+      System.err.println("Usage: java FsShell");
+      System.err.println("           [-fs <local | file system URI>]");
+      System.err.println("           [-conf <configuration file>]");
+      System.err.println("           [-D <[property=value>]");
+      System.err.println("           [-ls <path>]" );
+      System.err.println("           [-lsr <path>]");
+      System.err.println("           [-du <path>]");
+      System.err.println("           [-dus <path>]");
+      System.err.println("           [-mv <src> <dst>]");
+      System.err.println("           [-cp <src> <dst>]");
+      System.err.println("           [-rm <path>]");
+      System.err.println("           [-rmr <path>]");
+      System.err.println("           [-expunge]");
+      System.err.println("           [-put <localsrc> <dst>]");
+      System.err.println("           [-copyFromLocal <localsrc> <dst>]");
+      System.err.println("           [-moveFromLocal <localsrc> <dst>]");
+      System.err.println("           [-get [-crc] <src> <localdst>]");
+      System.err.println("           [-getmerge <src> <localdst> [addnl]]");
+      System.err.println("           [-cat <src>]");
+      System.err.println("           [-copyToLocal [-crc] <src> <localdst>]");
+      System.err.println("           [-moveToLocal [-crc] <src> <localdst>]");
+      System.err.println("           [-mkdir <path>]");
+      System.err.println("           [-setrep [-R] <rep> <path/file>]");
+      System.err.println("           [-help [cmd]]");
     }
+  }
 
-    /**
-     * run
-     */
-    public int run( String argv[] ) throws Exception {
+  /**
+   * run
+   */
+  public int run( String argv[] ) throws Exception {
 
-        if (argv.length < 1) {
-            printUsage(""); 
-            return -1;
-        }
+    if (argv.length < 1) {
+      printUsage(""); 
+      return -1;
+    }
 
-        int exitCode = -1;
-        int i = 0;
-        String cmd = argv[i++];
+    int exitCode = -1;
+    int i = 0;
+    String cmd = argv[i++];
 
-        //
-        // verify that we have enough command line parameters
-        //
-        if ("-put".equals(cmd) || 
-            "-copyFromLocal".equals(cmd) || "-moveFromLocal".equals(cmd)) {
-                if (argv.length != 3) {
-                  printUsage(cmd);
-                  return exitCode;
-                }
-        } else if ("-get".equals(cmd) || 
-            "-copyToLocal".equals(cmd) || "-moveToLocal".equals(cmd)) {
-                if (argv.length < 3) {
-                  printUsage(cmd);
-                  return exitCode;
-                }
-        } else if ("-mv".equals(cmd) || "-cp".equals(cmd)) {
-                if (argv.length < 3) {
-                  printUsage(cmd);
-                  return exitCode;
-                }
-        } else if ("-rm".equals(cmd) || "-rmr".equals(cmd) ||
-                   "-cat".equals(cmd) || "-mkdir".equals(cmd)) {
-                if (argv.length < 2) {
-                  printUsage(cmd);
-                  return exitCode;
-                }
-        }
+    //
+    // verify that we have enough command line parameters
+    //
+    if ("-put".equals(cmd) || 
+        "-copyFromLocal".equals(cmd) || "-moveFromLocal".equals(cmd)) {
+      if (argv.length != 3) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-get".equals(cmd) || 
+               "-copyToLocal".equals(cmd) || "-moveToLocal".equals(cmd)) {
+      if (argv.length < 3) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-mv".equals(cmd) || "-cp".equals(cmd)) {
+      if (argv.length < 3) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    } else if ("-rm".equals(cmd) || "-rmr".equals(cmd) ||
+               "-cat".equals(cmd) || "-mkdir".equals(cmd)) {
+      if (argv.length < 2) {
+        printUsage(cmd);
+        return exitCode;
+      }
+    }
 
-        // initialize FsShell
-        try {
-            init();
-        } catch (RPC.VersionMismatch v) { 
-            System.err.println("Version Mismatch between client and server" +
-                               "... command aborted.");
-            return exitCode;
-        } catch (IOException e) {
-            System.err.println("Bad connection to FS. command aborted.");
-            return exitCode;
-        }
+    // initialize FsShell
+    try {
+      init();
+    } catch (RPC.VersionMismatch v) { 
+      System.err.println("Version Mismatch between client and server" +
+                         "... command aborted.");
+      return exitCode;
+    } catch (IOException e) {
+      System.err.println("Bad connection to FS. command aborted.");
+      return exitCode;
+    }
 
-        exitCode = 0;
-        try {
-            if ("-put".equals(cmd) || "-copyFromLocal".equals(cmd)) {
-                copyFromLocal(new Path(argv[i++]), argv[i++]);
-            } else if ("-moveFromLocal".equals(cmd)) {
-                moveFromLocal(new Path(argv[i++]), argv[i++]);
-            } else if ("-get".equals(cmd) || "-copyToLocal".equals(cmd)) {
-                copyToLocal(argv, i);
-            } else if ("-getmerge".equals(cmd)) {
-                if(argv.length>i+2)
-                    copyMergeToLocal(argv[i++], new Path(argv[i++]), Boolean.parseBoolean(argv[i++]));
-                else
-                    copyMergeToLocal(argv[i++], new Path(argv[i++]));
-            } else if ("-cat".equals(cmd)) {
-                exitCode = doall(cmd, argv, conf, i);
-            } else if ("-moveToLocal".equals(cmd)) {
-                moveToLocal(argv[i++], new Path(argv[i++]));
-            } else if ("-setrep".equals(cmd)) {
-            	setReplication(argv, i);           
-            } else if ("-ls".equals(cmd)) {
-                if (i < argv.length) {
-                    exitCode = doall(cmd, argv, conf, i);
-                } else {
-                    ls("", false);
-                } 
-            } else if ("-lsr".equals(cmd)) {
-                if (i < argv.length) {
-                    exitCode = doall(cmd, argv, conf, i);
-                } else {
-                    ls("", true);
-                } 
-            } else if ("-mv".equals(cmd)) {
-                exitCode = rename(argv, conf);
-            } else if ("-cp".equals(cmd)) {
-                exitCode = copy(argv, conf);
-            } else if ("-rm".equals(cmd)) {
-                exitCode = doall(cmd, argv, conf, i);
-            } else if ("-rmr".equals(cmd)) {
-                exitCode = doall(cmd, argv, conf, i);
-            } else if ("-expunge".equals(cmd)) {
-                expunge();
-            } else if ("-du".equals(cmd)) {
-                if (i < argv.length) {
-                    exitCode = doall(cmd, argv, conf, i);
-                } else {
-                    du("");
-                }
-            } else if( "-dus".equals(cmd)) {
-              if (i < argv.length) {
-                  exitCode = doall(cmd, argv, conf, i);
-              } else {
-                  dus("");
-              }         
-            } else if ("-mkdir".equals(cmd)) {
-                exitCode = doall(cmd, argv, conf, i);
-            } else if ("-help".equals(cmd)) {
-                if (i < argv.length) {
-                    printHelp(argv[i]);
-                } else {
-                    printHelp("");
-                }
-            } else {
-                exitCode = -1;
-                System.err.println(cmd.substring(1) + ": Unknown command");
-                printUsage("");
-            }
-        } catch (RemoteException e) {
-          //
-          // This is a error returned by hadoop server. Print
-          // out the first line of the error mesage, ignore the stack trace.
-          exitCode = -1;
-          try {
-            String[] content;
-            content = e.getLocalizedMessage().split("\n");
-            System.err.println(cmd.substring(1) + ": " + 
-                               content[0]);
-          } catch (Exception ex) {
-            System.err.println(cmd.substring(1) + ": " + 
-                               ex.getLocalizedMessage());  
-          }
-        } catch (IOException e ) {
-          //
-          // IO exception encountered locally.
-          // 
-          exitCode = -1;
-          System.err.println(cmd.substring(1) + ": " + 
-                             e.getLocalizedMessage());  
-        } finally {
-            fs.close();
+    exitCode = 0;
+    try {
+      if ("-put".equals(cmd) || "-copyFromLocal".equals(cmd)) {
+        copyFromLocal(new Path(argv[i++]), argv[i++]);
+      } else if ("-moveFromLocal".equals(cmd)) {
+        moveFromLocal(new Path(argv[i++]), argv[i++]);
+      } else if ("-get".equals(cmd) || "-copyToLocal".equals(cmd)) {
+        copyToLocal(argv, i);
+      } else if ("-getmerge".equals(cmd)) {
+        if(argv.length>i+2)
+          copyMergeToLocal(argv[i++], new Path(argv[i++]), Boolean.parseBoolean(argv[i++]));
+        else
+          copyMergeToLocal(argv[i++], new Path(argv[i++]));
+      } else if ("-cat".equals(cmd)) {
+        exitCode = doall(cmd, argv, conf, i);
+      } else if ("-moveToLocal".equals(cmd)) {
+        moveToLocal(argv[i++], new Path(argv[i++]));
+      } else if ("-setrep".equals(cmd)) {
+        setReplication(argv, i);           
+      } else if ("-ls".equals(cmd)) {
+        if (i < argv.length) {
+          exitCode = doall(cmd, argv, conf, i);
+        } else {
+          ls("", false);
+        } 
+      } else if ("-lsr".equals(cmd)) {
+        if (i < argv.length) {
+          exitCode = doall(cmd, argv, conf, i);
+        } else {
+          ls("", true);
+        } 
+      } else if ("-mv".equals(cmd)) {
+        exitCode = rename(argv, conf);
+      } else if ("-cp".equals(cmd)) {
+        exitCode = copy(argv, conf);
+      } else if ("-rm".equals(cmd)) {
+        exitCode = doall(cmd, argv, conf, i);
+      } else if ("-rmr".equals(cmd)) {
+        exitCode = doall(cmd, argv, conf, i);
+      } else if ("-expunge".equals(cmd)) {
+        expunge();
+      } else if ("-du".equals(cmd)) {
+        if (i < argv.length) {
+          exitCode = doall(cmd, argv, conf, i);
+        } else {
+          du("");
         }
-        return exitCode;
+      } else if( "-dus".equals(cmd)) {
+        if (i < argv.length) {
+          exitCode = doall(cmd, argv, conf, i);
+        } else {
+          dus("");
+        }         
+      } else if ("-mkdir".equals(cmd)) {
+        exitCode = doall(cmd, argv, conf, i);
+      } else if ("-help".equals(cmd)) {
+        if (i < argv.length) {
+          printHelp(argv[i]);
+        } else {
+          printHelp("");
+        }
+      } else {
+        exitCode = -1;
+        System.err.println(cmd.substring(1) + ": Unknown command");
+        printUsage("");
+      }
+    } catch (RemoteException e) {
+      //
+      // This is a error returned by hadoop server. Print
+      // out the first line of the error mesage, ignore the stack trace.
+      exitCode = -1;
+      try {
+        String[] content;
+        content = e.getLocalizedMessage().split("\n");
+        System.err.println(cmd.substring(1) + ": " + 
+                           content[0]);
+      } catch (Exception ex) {
+        System.err.println(cmd.substring(1) + ": " + 
+                           ex.getLocalizedMessage());  
+      }
+    } catch (IOException e ) {
+      //
+      // IO exception encountered locally.
+      // 
+      exitCode = -1;
+      System.err.println(cmd.substring(1) + ": " + 
+                         e.getLocalizedMessage());  
+    } finally {
+      fs.close();
     }
+    return exitCode;
+  }
 
-    /**
-     * main() has some simple utility methods
-     */
-    public static void main(String argv[]) throws Exception {
-        int res = new FsShell().doMain(new Configuration(), argv);
-        System.exit(res);
-    }
+  /**
+   * main() has some simple utility methods
+   */
+  public static void main(String argv[]) throws Exception {
+    int res = new FsShell().doMain(new Configuration(), argv);
+    System.exit(res);
+  }
 }

+ 340 - 340
src/java/org/apache/hadoop/fs/InMemoryFileSystem.java

@@ -38,447 +38,447 @@ import org.apache.hadoop.util.Progressable;
  */
 public class InMemoryFileSystem extends ChecksumFileSystem {
   private static class RawInMemoryFileSystem extends FileSystem {
-  private URI uri;
-  private int fsSize;
-  private volatile int totalUsed;
-  private Path staticWorkingDir;
+    private URI uri;
+    private int fsSize;
+    private volatile int totalUsed;
+    private Path staticWorkingDir;
   
-  //pathToFileAttribs is the final place where a file is put after it is closed
-  private Map <String, FileAttributes> pathToFileAttribs = new HashMap();
+    //pathToFileAttribs is the final place where a file is put after it is closed
+    private Map <String, FileAttributes> pathToFileAttribs = new HashMap();
   
-  //tempFileAttribs is a temp place which is updated while reserving memory for
-  //files we are going to create. It is read in the createRaw method and the
-  //temp key/value is discarded. If the file makes it to "close", then it
-  //ends up being in the pathToFileAttribs map.
-  private Map <String, FileAttributes> tempFileAttribs = new HashMap();
+    //tempFileAttribs is a temp place which is updated while reserving memory for
+    //files we are going to create. It is read in the createRaw method and the
+    //temp key/value is discarded. If the file makes it to "close", then it
+    //ends up being in the pathToFileAttribs map.
+    private Map <String, FileAttributes> tempFileAttribs = new HashMap();
   
-  public RawInMemoryFileSystem() {
-    setConf(new Configuration());
-  }
+    public RawInMemoryFileSystem() {
+      setConf(new Configuration());
+    }
 
-  public RawInMemoryFileSystem(URI uri, Configuration conf) {
-    initialize(uri, conf);
-  }
+    public RawInMemoryFileSystem(URI uri, Configuration conf) {
+      initialize(uri, conf);
+    }
   
-  //inherit javadoc
-  public void initialize(URI uri, Configuration conf) {
-    setConf(conf);
-    int size = Integer.parseInt(conf.get("fs.inmemory.size.mb", "100"));
-    this.fsSize = size * 1024 * 1024;
-    this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
-    this.staticWorkingDir = new Path(this.uri.getPath());
-    LOG.info("Initialized InMemoryFileSystem: " + uri.toString() + 
-             " of size (in bytes): " + fsSize);
-  }
+    //inherit javadoc
+    public void initialize(URI uri, Configuration conf) {
+      setConf(conf);
+      int size = Integer.parseInt(conf.get("fs.inmemory.size.mb", "100"));
+      this.fsSize = size * 1024 * 1024;
+      this.uri = URI.create(uri.getScheme() + "://" + uri.getAuthority());
+      this.staticWorkingDir = new Path(this.uri.getPath());
+      LOG.info("Initialized InMemoryFileSystem: " + uri.toString() + 
+               " of size (in bytes): " + fsSize);
+    }
 
-  //inherit javadoc
-  public URI getUri() {
-    return uri;
-  }
+    //inherit javadoc
+    public URI getUri() {
+      return uri;
+    }
 
-  /** @deprecated */
-  public String getName() {
-    return uri.toString();
-  }
+    /** @deprecated */
+    public String getName() {
+      return uri.toString();
+    }
 
-  /**
-   * Return 1x1 'inmemory' cell if the file exists.
-   * Return null if otherwise.
-   */
-  public String[][] getFileCacheHints(Path f, long start, long len)
+    /**
+     * Return 1x1 'inmemory' cell if the file exists.
+     * Return null if otherwise.
+     */
+    public String[][] getFileCacheHints(Path f, long start, long len)
       throws IOException {
-    if (! exists(f)) {
-      return null;
-    } else {
-      return new String[][] {{"inmemory"}};
+      if (! exists(f)) {
+        return null;
+      } else {
+        return new String[][] {{"inmemory"}};
+      }
     }
-  }
 
-  private class InMemoryInputStream extends FSInputStream {
-    private DataInputBuffer din = new DataInputBuffer();
-    private FileAttributes fAttr;
+    private class InMemoryInputStream extends FSInputStream {
+      private DataInputBuffer din = new DataInputBuffer();
+      private FileAttributes fAttr;
     
-    public InMemoryInputStream(Path f) throws IOException {
-      synchronized (RawInMemoryFileSystem.this) {
-        fAttr = pathToFileAttribs.get(getPath(f));
-        if (fAttr == null) { 
-          throw new FileNotFoundException("File " + f + " does not exist");
-        }                            
-        din.reset(fAttr.data, 0, fAttr.size);
+      public InMemoryInputStream(Path f) throws IOException {
+        synchronized (RawInMemoryFileSystem.this) {
+          fAttr = pathToFileAttribs.get(getPath(f));
+          if (fAttr == null) { 
+            throw new FileNotFoundException("File " + f + " does not exist");
+          }                            
+          din.reset(fAttr.data, 0, fAttr.size);
+        }
       }
-    }
     
-    public long getPos() throws IOException {
-      return din.getPosition();
-    }
+      public long getPos() throws IOException {
+        return din.getPosition();
+      }
     
-    public void seek(long pos) throws IOException {
-      if ((int)pos > fAttr.size)
-        throw new IOException("Cannot seek after EOF");
-      din.reset(fAttr.data, (int)pos, fAttr.size - (int)pos);
-    }
+      public void seek(long pos) throws IOException {
+        if ((int)pos > fAttr.size)
+          throw new IOException("Cannot seek after EOF");
+        din.reset(fAttr.data, (int)pos, fAttr.size - (int)pos);
+      }
     
-    public boolean seekToNewSource(long targetPos) throws IOException {
-      return false;
-    }
+      public boolean seekToNewSource(long targetPos) throws IOException {
+        return false;
+      }
 
-    public int available() throws IOException {
-      return din.available(); 
-    }
-    public boolean markSupport() { return false; }
+      public int available() throws IOException {
+        return din.available(); 
+      }
+      public boolean markSupport() { return false; }
 
-    public int read() throws IOException {
-      return din.read();
-    }
+      public int read() throws IOException {
+        return din.read();
+      }
 
-    public int read(byte[] b, int off, int len) throws IOException {
-      return din.read(b, off, len);
-    }
+      public int read(byte[] b, int off, int len) throws IOException {
+        return din.read(b, off, len);
+      }
     
-    public long skip(long n) throws IOException { return din.skip(n); }
-  }
-
-  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-    return new FSDataInputStream(new InMemoryInputStream(f), bufferSize);
-  }
+      public long skip(long n) throws IOException { return din.skip(n); }
+    }
 
-  private class InMemoryOutputStream extends OutputStream {
-    private int count;
-    private FileAttributes fAttr;
-    private Path f;
-    
-    public InMemoryOutputStream(Path f, FileAttributes fAttr) 
-    throws IOException {
-      this.fAttr = fAttr;
-      this.f = f;
+    public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+      return new FSDataInputStream(new InMemoryInputStream(f), bufferSize);
     }
+
+    private class InMemoryOutputStream extends OutputStream {
+      private int count;
+      private FileAttributes fAttr;
+      private Path f;
     
-    public long getPos() throws IOException {
-      return count;
-    }
+      public InMemoryOutputStream(Path f, FileAttributes fAttr) 
+        throws IOException {
+        this.fAttr = fAttr;
+        this.f = f;
+      }
     
-    public void close() throws IOException {
-      synchronized (RawInMemoryFileSystem.this) {
-        pathToFileAttribs.put(getPath(f), fAttr);
+      public long getPos() throws IOException {
+        return count;
       }
-    }
     
-    public void write(byte[] b, int off, int len) throws IOException {
-      if ((off < 0) || (off > b.length) || (len < 0) ||
-          ((off + len) > b.length) || ((off + len) < 0)) {
-        throw new IndexOutOfBoundsException();
-      } else if (len == 0) {
-        return;
+      public void close() throws IOException {
+        synchronized (RawInMemoryFileSystem.this) {
+          pathToFileAttribs.put(getPath(f), fAttr);
+        }
       }
-      int newcount = count + len;
-      if (newcount > fAttr.size) {
-        throw new IOException("Insufficient space");
+    
+      public void write(byte[] b, int off, int len) throws IOException {
+        if ((off < 0) || (off > b.length) || (len < 0) ||
+            ((off + len) > b.length) || ((off + len) < 0)) {
+          throw new IndexOutOfBoundsException();
+        } else if (len == 0) {
+          return;
+        }
+        int newcount = count + len;
+        if (newcount > fAttr.size) {
+          throw new IOException("Insufficient space");
+        }
+        System.arraycopy(b, off, fAttr.data, count, len);
+        count = newcount;
       }
-      System.arraycopy(b, off, fAttr.data, count, len);
-      count = newcount;
-    }
     
-    public void write(int b) throws IOException {
-      int newcount = count + 1;
-      if (newcount > fAttr.size) {
-        throw new IOException("Insufficient space");
+      public void write(int b) throws IOException {
+        int newcount = count + 1;
+        if (newcount > fAttr.size) {
+          throw new IOException("Insufficient space");
+        }
+        fAttr.data[count] = (byte)b;
+        count = newcount;
       }
-      fAttr.data[count] = (byte)b;
-      count = newcount;
     }
-  }
   
-  public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-      short replication, long blockSize, Progressable progress)
+    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
+                                     short replication, long blockSize, Progressable progress)
       throws IOException {
-    synchronized (this) {
-      if (exists(f) && ! overwrite) {
-        throw new IOException("File already exists:"+f);
+      synchronized (this) {
+        if (exists(f) && ! overwrite) {
+          throw new IOException("File already exists:"+f);
+        }
+        FileAttributes fAttr =(FileAttributes) tempFileAttribs.remove(getPath(f));
+        if (fAttr != null)
+          return create(f, fAttr);
+        return null;
       }
-      FileAttributes fAttr =(FileAttributes) tempFileAttribs.remove(getPath(f));
-      if (fAttr != null)
-        return create(f, fAttr);
-      return null;
     }
-  }
   
-  public FSDataOutputStream create(Path f, FileAttributes fAttr)
+    public FSDataOutputStream create(Path f, FileAttributes fAttr)
       throws IOException {
-    // the path is not added into the filesystem (in the pathToFileAttribs
-    // map) until close is called on the outputstream that this method is
-    // going to return
-    // Create an output stream out of data byte array
-    return new FSDataOutputStream(new InMemoryOutputStream(f, fAttr),
-        getConf());
-  }
+      // the path is not added into the filesystem (in the pathToFileAttribs
+      // map) until close is called on the outputstream that this method is
+      // going to return
+      // Create an output stream out of data byte array
+      return new FSDataOutputStream(new InMemoryOutputStream(f, fAttr),
+                                    getConf());
+    }
 
-  public void close() throws IOException {
-    super.close();
-    synchronized (this) {
-      if (pathToFileAttribs != null) { 
-        pathToFileAttribs.clear();
-      }
-      pathToFileAttribs = null;
-      if (tempFileAttribs != null) {
-        tempFileAttribs.clear();
+    public void close() throws IOException {
+      super.close();
+      synchronized (this) {
+        if (pathToFileAttribs != null) { 
+          pathToFileAttribs.clear();
+        }
+        pathToFileAttribs = null;
+        if (tempFileAttribs != null) {
+          tempFileAttribs.clear();
+        }
+        tempFileAttribs = null;
       }
-      tempFileAttribs = null;
     }
-  }
 
-  /**
-   * Replication is not supported for the inmemory file system.
-   */
-  public short getReplication(Path src) throws IOException {
-    return 1;
-  }
+    /**
+     * Replication is not supported for the inmemory file system.
+     */
+    public short getReplication(Path src) throws IOException {
+      return 1;
+    }
 
-  public boolean setReplication(Path src, short replication)
+    public boolean setReplication(Path src, short replication)
       throws IOException {
-    return true;
-  }
-
-  public boolean rename(Path src, Path dst) throws IOException {
-    synchronized (this) {
-      if (exists(dst)) {
-        throw new IOException ("Path " + dst + " already exists");
-      }
-      FileAttributes fAttr = pathToFileAttribs.remove(getPath(src));
-      if (fAttr == null) return false;
-      pathToFileAttribs.put(getPath(dst), fAttr);
       return true;
     }
-  }
 
-  public boolean delete(Path f) throws IOException {
-    synchronized (this) {
-      FileAttributes fAttr = pathToFileAttribs.remove(getPath(f));
-      if (fAttr != null) {
-        fAttr.data = null;
-        totalUsed -= fAttr.size;
+    public boolean rename(Path src, Path dst) throws IOException {
+      synchronized (this) {
+        if (exists(dst)) {
+          throw new IOException ("Path " + dst + " already exists");
+        }
+        FileAttributes fAttr = pathToFileAttribs.remove(getPath(src));
+        if (fAttr == null) return false;
+        pathToFileAttribs.put(getPath(dst), fAttr);
         return true;
       }
-      return false;
     }
-  }
 
-  public boolean exists(Path f) throws IOException {
-    synchronized (this) {
-      return pathToFileAttribs.containsKey(getPath(f));
+    public boolean delete(Path f) throws IOException {
+      synchronized (this) {
+        FileAttributes fAttr = pathToFileAttribs.remove(getPath(f));
+        if (fAttr != null) {
+          fAttr.data = null;
+          totalUsed -= fAttr.size;
+          return true;
+        }
+        return false;
+      }
+    }
+
+    public boolean exists(Path f) throws IOException {
+      synchronized (this) {
+        return pathToFileAttribs.containsKey(getPath(f));
+      }
     }
-  }
   
-  /**
-   * Directory operations are not supported
-   */
-  public boolean isDirectory(Path f) throws IOException {
-    return !isFile(f);
-  }
+    /**
+     * Directory operations are not supported
+     */
+    public boolean isDirectory(Path f) throws IOException {
+      return !isFile(f);
+    }
 
-  public boolean isFile(Path f) throws IOException {
-    return exists(f);
-  }
+    public boolean isFile(Path f) throws IOException {
+      return exists(f);
+    }
 
-  public long getLength(Path f) throws IOException {
-    synchronized (this) {
-      return pathToFileAttribs.get(getPath(f)).size;
+    public long getLength(Path f) throws IOException {
+      synchronized (this) {
+        return pathToFileAttribs.get(getPath(f)).size;
+      }
     }
-  }
   
-  /**
-   * Directory operations are not supported
-   */
-  public Path[] listPaths(Path f) throws IOException {
-    return null;
-  }
+    /**
+     * Directory operations are not supported
+     */
+    public Path[] listPaths(Path f) throws IOException {
+      return null;
+    }
 
-  public void setWorkingDirectory(Path new_dir) {
-    staticWorkingDir = new_dir;
-  }
+    public void setWorkingDirectory(Path new_dir) {
+      staticWorkingDir = new_dir;
+    }
   
-  public Path getWorkingDirectory() {
-    return staticWorkingDir;
-  }
+    public Path getWorkingDirectory() {
+      return staticWorkingDir;
+    }
 
-  public boolean mkdirs(Path f) throws IOException {
-    return true;
-  }
+    public boolean mkdirs(Path f) throws IOException {
+      return true;
+    }
   
-  /** lock operations are not supported */
-  public void lock(Path f, boolean shared) throws IOException {}
-  public void release(Path f) throws IOException {}
+    /** lock operations are not supported */
+    public void lock(Path f, boolean shared) throws IOException {}
+    public void release(Path f) throws IOException {}
   
-  /** copy/move operations are not supported */
-  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+    /** copy/move operations are not supported */
+    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
       throws IOException {
-  }
+    }
 
-  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
       throws IOException {
-  }
+    }
 
-  public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+    public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
       throws IOException {
-    return fsOutputFile;
-  }
+      return fsOutputFile;
+    }
 
-  public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+    public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
       throws IOException {
-  }
+    }
 
-  public long getBlockSize(Path f) throws IOException {
-    return getDefaultBlockSize();
-  }
+    public long getBlockSize(Path f) throws IOException {
+      return getDefaultBlockSize();
+    }
 
-  public long getDefaultBlockSize() {
-    return 32 * 1024; //some random large number. can be anything actually
-  }
+    public long getDefaultBlockSize() {
+      return 32 * 1024; //some random large number. can be anything actually
+    }
 
-  public short getDefaultReplication() {
-    return 1;
-  }
+    public short getDefaultReplication() {
+      return 1;
+    }
   
-  /** Some APIs exclusively for InMemoryFileSystem */
+    /** Some APIs exclusively for InMemoryFileSystem */
   
-  /** Register a path with its size. */
-  public boolean reserveSpace(Path f, int size) {
-    synchronized (this) {
-      if (!canFitInMemory(size))
-        return false;
-      FileAttributes fileAttr;
-      try {
-        fileAttr = new FileAttributes(size);
-      } catch (OutOfMemoryError o) {
-        return false;
+    /** Register a path with its size. */
+    public boolean reserveSpace(Path f, int size) {
+      synchronized (this) {
+        if (!canFitInMemory(size))
+          return false;
+        FileAttributes fileAttr;
+        try {
+          fileAttr = new FileAttributes(size);
+        } catch (OutOfMemoryError o) {
+          return false;
+        }
+        totalUsed += size;
+        tempFileAttribs.put(getPath(f), fileAttr);
+        return true;
       }
-      totalUsed += size;
-      tempFileAttribs.put(getPath(f), fileAttr);
-      return true;
     }
-  }
   
-  /** This API getClosedFiles could have been implemented over listPathsRaw
-   * but it is an overhead to maintain directory structures for this impl of
-   * the in-memory fs.
-   */
-  public Path[] getFiles(PathFilter filter) {
-    synchronized (this) {
-      List<String> closedFilesList = new ArrayList<String>();
-      synchronized (pathToFileAttribs) {
-        Set paths = pathToFileAttribs.keySet();
-        if (paths == null || paths.isEmpty()) {
-          return new Path[0];
-        }
-        Iterator iter = paths.iterator();
-        while (iter.hasNext()) {
-          String f = (String)iter.next();
-          if (filter.accept(new Path(f))) {
-            closedFilesList.add(f);
+    /** This API getClosedFiles could have been implemented over listPathsRaw
+     * but it is an overhead to maintain directory structures for this impl of
+     * the in-memory fs.
+     */
+    public Path[] getFiles(PathFilter filter) {
+      synchronized (this) {
+        List<String> closedFilesList = new ArrayList<String>();
+        synchronized (pathToFileAttribs) {
+          Set paths = pathToFileAttribs.keySet();
+          if (paths == null || paths.isEmpty()) {
+            return new Path[0];
+          }
+          Iterator iter = paths.iterator();
+          while (iter.hasNext()) {
+            String f = (String)iter.next();
+            if (filter.accept(new Path(f))) {
+              closedFilesList.add(f);
+            }
           }
         }
+        String [] names = 
+          closedFilesList.toArray(new String[closedFilesList.size()]);
+        Path [] results = new Path[names.length];
+        for (int i = 0; i < names.length; i++) {
+          results[i] = new Path(names[i]);
+        }
+        return results;
       }
-      String [] names = 
-        closedFilesList.toArray(new String[closedFilesList.size()]);
-      Path [] results = new Path[names.length];
-      for (int i = 0; i < names.length; i++) {
-        results[i] = new Path(names[i]);
-      }
-      return results;
     }
-  }
   
-  public int getNumFiles(PathFilter filter) {
-    return getFiles(filter).length;
-  }
+    public int getNumFiles(PathFilter filter) {
+      return getFiles(filter).length;
+    }
 
-  public int getFSSize() {
-    return fsSize;
-  }
+    public int getFSSize() {
+      return fsSize;
+    }
   
-  public float getPercentUsed() {
-    if (fsSize > 0)
-      return (float)totalUsed/fsSize;
-    else return 0.1f;
-  }
+    public float getPercentUsed() {
+      if (fsSize > 0)
+        return (float)totalUsed/fsSize;
+      else return 0.1f;
+    }
  
-  private boolean canFitInMemory(int size) {
-    if (size + totalUsed < fsSize)
-      return true;
-    return false;
-  }
+    private boolean canFitInMemory(int size) {
+      if (size + totalUsed < fsSize)
+        return true;
+      return false;
+    }
   
-  private String getPath(Path f) {
-    return f.toUri().getPath();
-  }
+    private String getPath(Path f) {
+      return f.toUri().getPath();
+    }
   
-  private static class FileAttributes {
-    private byte[] data;
-    private int size;
+    private static class FileAttributes {
+      private byte[] data;
+      private int size;
     
-    public FileAttributes(int size) {
-      this.size = size;
-      this.data = new byte[size];
+      public FileAttributes(int size) {
+        this.size = size;
+        this.data = new byte[size];
+      }
     }
-  }
   }
     
-    public InMemoryFileSystem() {
-        super(new RawInMemoryFileSystem());
-    }
+  public InMemoryFileSystem() {
+    super(new RawInMemoryFileSystem());
+  }
     
-    public InMemoryFileSystem(URI uri, Configuration conf) {
-        super(new RawInMemoryFileSystem(uri, conf));
-    }
+  public InMemoryFileSystem(URI uri, Configuration conf) {
+    super(new RawInMemoryFileSystem(uri, conf));
+  }
     
-    /** copy/move operations are not supported */
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+  /** copy/move operations are not supported */
+  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {}
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {}
     
-    public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+  public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
-        return fsOutputFile;
-    }
+    return fsOutputFile;
+  }
     
-    public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+  public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
-    }
+  }
     
-    /**
-     * Register a file with its size. This will also register a checksum for the
-     * file that the user is trying to create. This is required since none of
-     * the FileSystem APIs accept the size of the file as argument. But since it
-     * is required for us to apriori know the size of the file we are going to
-     * create, the user must call this method for each file he wants to create
-     * and reserve memory for that file. We either succeed in reserving memory
-     * for both the main file and the checksum file and return true, or return
-     * false.
-     */
-    public boolean reserveSpaceWithCheckSum(Path f, int size) {
-        // get the size of the checksum file (we know it is going to be 'int'
-        // since this is an inmem fs with file sizes that will fit in 4 bytes)
-        long checksumSize = getChecksumFileLength(f, size);
-        RawInMemoryFileSystem mfs = (RawInMemoryFileSystem)getRawFileSystem();
-        synchronized(mfs) {
-            return mfs.reserveSpace(f, size) && 
-            mfs.reserveSpace(getChecksumFile(f),
-                    (int)getChecksumFileLength(f, size));
-        }
-    }
-    public Path[] getFiles(PathFilter filter) {
-        return ((RawInMemoryFileSystem)getRawFileSystem()).getFiles(filter);
+  /**
+   * Register a file with its size. This will also register a checksum for the
+   * file that the user is trying to create. This is required since none of
+   * the FileSystem APIs accept the size of the file as argument. But since it
+   * is required for us to apriori know the size of the file we are going to
+   * create, the user must call this method for each file he wants to create
+   * and reserve memory for that file. We either succeed in reserving memory
+   * for both the main file and the checksum file and return true, or return
+   * false.
+   */
+  public boolean reserveSpaceWithCheckSum(Path f, int size) {
+    // get the size of the checksum file (we know it is going to be 'int'
+    // since this is an inmem fs with file sizes that will fit in 4 bytes)
+    long checksumSize = getChecksumFileLength(f, size);
+    RawInMemoryFileSystem mfs = (RawInMemoryFileSystem)getRawFileSystem();
+    synchronized(mfs) {
+      return mfs.reserveSpace(f, size) && 
+        mfs.reserveSpace(getChecksumFile(f),
+                         (int)getChecksumFileLength(f, size));
     }
+  }
+  public Path[] getFiles(PathFilter filter) {
+    return ((RawInMemoryFileSystem)getRawFileSystem()).getFiles(filter);
+  }
     
-    public int getNumFiles(PathFilter filter) {
-      return ((RawInMemoryFileSystem)getRawFileSystem()).getNumFiles(filter);
-    }
+  public int getNumFiles(PathFilter filter) {
+    return ((RawInMemoryFileSystem)getRawFileSystem()).getNumFiles(filter);
+  }
 
-    public int getFSSize() {
-        return ((RawInMemoryFileSystem)getRawFileSystem()).getFSSize();
-    }
+  public int getFSSize() {
+    return ((RawInMemoryFileSystem)getRawFileSystem()).getFSSize();
+  }
     
-    public float getPercentUsed() {
-        return ((RawInMemoryFileSystem)getRawFileSystem()).getPercentUsed();
-    }
+  public float getPercentUsed() {
+    return ((RawInMemoryFileSystem)getRawFileSystem()).getPercentUsed();
+  }
 }

+ 57 - 57
src/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -28,78 +28,78 @@ import java.util.*;
  * @author Mike Cafarella
  *****************************************************************/
 public class LocalFileSystem extends ChecksumFileSystem {
-    static final URI NAME = URI.create("file:///");
+  static final URI NAME = URI.create("file:///");
 
-    public LocalFileSystem() {
-        super(new RawLocalFileSystem());
-    }
+  public LocalFileSystem() {
+    super(new RawLocalFileSystem());
+  }
     
-    public LocalFileSystem( FileSystem rawLocalFileSystem ) {
-        super(rawLocalFileSystem);
-    }
+  public LocalFileSystem( FileSystem rawLocalFileSystem ) {
+    super(rawLocalFileSystem);
+  }
     
-    /** Convert a path to a File. */
-    public File pathToFile(Path path) {
-      return ((RawLocalFileSystem)fs).pathToFile(path);
-    }
+  /** Convert a path to a File. */
+  public File pathToFile(Path path) {
+    return ((RawLocalFileSystem)fs).pathToFile(path);
+  }
 
-    @Override
+  @Override
     public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
-      FileUtil.copy(this, src, this, dst, delSrc, getConf());
-    }
+    FileUtil.copy(this, src, this, dst, delSrc, getConf());
+  }
 
-    @Override
+  @Override
     public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
-      FileUtil.copy(this, src, this, dst, delSrc, getConf());
-    }
+    FileUtil.copy(this, src, this, dst, delSrc, getConf());
+  }
 
-    /**
-     * Moves files to a bad file directory on the same device, so that their
-     * storage will not be reused.
-     */
-    public boolean reportChecksumFailure(Path p, FSDataInputStream in,
-                                      long inPos,
-                                      FSDataInputStream sums, long sumsPos) {
-      try {
-        // canonicalize f
-        File f = ((RawLocalFileSystem)fs).pathToFile(p).getCanonicalFile();
+  /**
+   * Moves files to a bad file directory on the same device, so that their
+   * storage will not be reused.
+   */
+  public boolean reportChecksumFailure(Path p, FSDataInputStream in,
+                                       long inPos,
+                                       FSDataInputStream sums, long sumsPos) {
+    try {
+      // canonicalize f
+      File f = ((RawLocalFileSystem)fs).pathToFile(p).getCanonicalFile();
       
-        // find highest writable parent dir of f on the same device
-        String device = new DF(f, getConf()).getMount();
-        File parent = f.getParentFile();
-        File dir = null;
-        while (parent!=null && parent.canWrite() && parent.toString().startsWith(device)) {
-          dir = parent;
-          parent = parent.getParentFile();
-        }
+      // find highest writable parent dir of f on the same device
+      String device = new DF(f, getConf()).getMount();
+      File parent = f.getParentFile();
+      File dir = null;
+      while (parent!=null && parent.canWrite() && parent.toString().startsWith(device)) {
+        dir = parent;
+        parent = parent.getParentFile();
+      }
 
-        if (dir==null) {
-          throw new IOException(
-              "not able to find the highest writable parent dir");
-        }
+      if (dir==null) {
+        throw new IOException(
+                              "not able to find the highest writable parent dir");
+      }
         
-        // move the file there
-        File badDir = new File(dir, "bad_files");
-        if (!badDir.mkdirs()) {
-          if (!badDir.isDirectory()) {
-            throw new IOException("Mkdirs failed to create " + badDir.toString());
-          }
+      // move the file there
+      File badDir = new File(dir, "bad_files");
+      if (!badDir.mkdirs()) {
+        if (!badDir.isDirectory()) {
+          throw new IOException("Mkdirs failed to create " + badDir.toString());
         }
-        String suffix = "." + new Random().nextInt();
-        File badFile = new File(badDir,f.getName()+suffix);
-        LOG.warn("Moving bad file " + f + " to " + badFile);
-        in.close();                               // close it first
-        f.renameTo(badFile);                      // rename it
+      }
+      String suffix = "." + new Random().nextInt();
+      File badFile = new File(badDir,f.getName()+suffix);
+      LOG.warn("Moving bad file " + f + " to " + badFile);
+      in.close();                               // close it first
+      f.renameTo(badFile);                      // rename it
 
-        // move checksum file too
-        File checkFile = ((RawLocalFileSystem)fs).pathToFile(getChecksumFile(p));
-        checkFile.renameTo(new File(badDir, checkFile.getName()+suffix));
+      // move checksum file too
+      File checkFile = ((RawLocalFileSystem)fs).pathToFile(getChecksumFile(p));
+      checkFile.renameTo(new File(badDir, checkFile.getName()+suffix));
 
-      } catch (IOException e) {
-        LOG.warn("Error moving bad file " + p + ": " + e);
-      }
-      return false;
+    } catch (IOException e) {
+      LOG.warn("Error moving bad file " + p + ": " + e);
     }
+    return false;
+  }
 }

+ 2 - 2
src/java/org/apache/hadoop/fs/PositionedReadable.java

@@ -28,7 +28,7 @@ public interface PositionedReadable {
    * change the current offset of a file, and is thread-safe.
    */
   public int read(long position, byte[] buffer, int offset, int length)
-  throws IOException;
+    throws IOException;
   
   /**
    * Read the specified number of bytes, from a given
@@ -36,7 +36,7 @@ public interface PositionedReadable {
    * change the current offset of a file, and is thread-safe.
    */
   public void readFully(long position, byte[] buffer, int offset, int length)
-  throws IOException;
+    throws IOException;
   
   /**
    * Read number of bytes equalt to the length of the buffer, from a given

+ 16 - 16
src/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -123,7 +123,7 @@ public class RawLocalFileSystem extends FileSystem {
     }
     
     public int read(long position, byte[] b, int off, int len)
-    throws IOException {
+      throws IOException {
       ByteBuffer bb = ByteBuffer.wrap(b, off, len);
       try {
         return fis.getChannel().read(bb, position);
@@ -179,8 +179,8 @@ public class RawLocalFileSystem extends FileSystem {
   }
   
   public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-      short replication, long blockSize, Progressable progress)
-  throws IOException {
+                                   short replication, long blockSize, Progressable progress)
+    throws IOException {
     if (exists(f) && ! overwrite) {
       throw new IOException("File already exists:"+f);
     }
@@ -200,8 +200,8 @@ public class RawLocalFileSystem extends FileSystem {
   
   /** Set the replication of the given file */
   public boolean setReplication( Path src,
-      short replication
-  ) throws IOException {
+                                 short replication
+                                 ) throws IOException {
     return true;
   }
   
@@ -261,24 +261,24 @@ public class RawLocalFileSystem extends FileSystem {
     Path parent = f.getParent();
     File p2f = pathToFile(f);
     return (parent == null || mkdirs(parent)) &&
-    (p2f.mkdir() || p2f.isDirectory());
+      (p2f.mkdir() || p2f.isDirectory());
   }
   
   /**
    * Set the working directory to the given directory.
    */
   @Override
-  public void setWorkingDirectory(Path newDir) {
+    public void setWorkingDirectory(Path newDir) {
     workingDir = newDir;
   }
   
   @Override
-  public Path getWorkingDirectory() {
+    public Path getWorkingDirectory() {
     return workingDir;
   }
   
   /** @deprecated */ @Deprecated
-  public void lock(Path p, boolean shared) throws IOException {
+    public void lock(Path p, boolean shared) throws IOException {
     File f = pathToFile(p);
     f.createNewFile();
     
@@ -301,7 +301,7 @@ public class RawLocalFileSystem extends FileSystem {
   }
   
   /** @deprecated */ @Deprecated
-  public void release(Path p) throws IOException {
+    public void release(Path p) throws IOException {
     File f = pathToFile(p);
     
     FileLock lockObj;
@@ -335,26 +335,26 @@ public class RawLocalFileSystem extends FileSystem {
   }
   
   @Override
-  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
-  throws IOException {
+    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+    throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }
   
   @Override
-  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
-  throws IOException {
+    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+    throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }
   
   // We can write output directly to the final location
   public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
-  throws IOException {
+    throws IOException {
     return fsOutputFile;
   }
   
   // It's in the right place - nothing to do.
   public void completeLocalOutput(Path fsWorkingFile, Path tmpLocalFile)
-  throws IOException {
+    throws IOException {
   }
   
   public void close() throws IOException {

+ 2 - 2
src/java/org/apache/hadoop/io/ArrayWritable.java

@@ -48,8 +48,8 @@ public class ArrayWritable implements Writable {
 
   public void setValueClass(Class valueClass) {
     if (valueClass != this.valueClass) {
-        this.valueClass = valueClass;
-        this.values = null;
+      this.valueClass = valueClass;
+      this.values = null;
     }
   }
   

+ 76 - 76
src/java/org/apache/hadoop/io/BooleanWritable.java

@@ -24,86 +24,86 @@ import java.io.*;
  * A WritableComparable for booleans. 
  */
 public class BooleanWritable implements WritableComparable {
-    private boolean value;
-
-    /** 
-     */
-    public BooleanWritable() {};
-
-    /** 
-     */
-    public BooleanWritable(boolean value) {
-        set(value);
-    }
-
-    /** 
-     * Set the value of the BooleanWritable
-     */    
-    public void set(boolean value) {
-        this.value = value;
-    }
-
-    /**
-     * Returns the value of the BooleanWritable
-     */
-    public boolean get() {
-        return value;
-    }
-
-    /**
-     */
-    public void readFields(DataInput in) throws IOException {
-        value = in.readBoolean();
-    }
-
-    /**
-     */
-    public void write(DataOutput out) throws IOException {
-        out.writeBoolean(value);
+  private boolean value;
+
+  /** 
+   */
+  public BooleanWritable() {};
+
+  /** 
+   */
+  public BooleanWritable(boolean value) {
+    set(value);
+  }
+
+  /** 
+   * Set the value of the BooleanWritable
+   */    
+  public void set(boolean value) {
+    this.value = value;
+  }
+
+  /**
+   * Returns the value of the BooleanWritable
+   */
+  public boolean get() {
+    return value;
+  }
+
+  /**
+   */
+  public void readFields(DataInput in) throws IOException {
+    value = in.readBoolean();
+  }
+
+  /**
+   */
+  public void write(DataOutput out) throws IOException {
+    out.writeBoolean(value);
+  }
+
+  /**
+   */
+  public boolean equals(Object o) {
+    if (!(o instanceof BooleanWritable)) {
+      return false;
     }
-
-    /**
-     */
-    public boolean equals(Object o) {
-        if (!(o instanceof BooleanWritable)) {
-            return false;
-        }
-        BooleanWritable other = (BooleanWritable) o;
-        return this.value == other.value;
+    BooleanWritable other = (BooleanWritable) o;
+    return this.value == other.value;
+  }
+
+  public int hashCode() {
+    return value ? 0 : 1;
+  }
+
+
+
+  /**
+   */
+  public int compareTo(Object o) {
+    boolean a = this.value;
+    boolean b = ((BooleanWritable) o).value;
+    return ((a == b) ? 0 : (a == false) ? -1 : 1);
+  }
+
+  /** 
+   * A Comparator optimized for BooleanWritable. 
+   */ 
+  public static class Comparator extends WritableComparator {
+    public Comparator() {
+      super(BooleanWritable.class);
     }
 
-    public int hashCode() {
-      return value ? 0 : 1;
+    public int compare(byte[] b1, int s1, int l1,
+                       byte[] b2, int s2, int l2) {
+      boolean a = (readInt(b1, s1) == 1) ? true : false;
+      boolean b = (readInt(b2, s2) == 1) ? true : false;
+      return ((a == b) ? 0 : (a == false) ? -1 : 1);
     }
+  }
 
 
-
-    /**
-     */
-    public int compareTo(Object o) {
-        boolean a = this.value;
-        boolean b = ((BooleanWritable) o).value;
-        return ((a == b) ? 0 : (a == false) ? -1 : 1);
-    }
-
-    /** 
-     * A Comparator optimized for BooleanWritable. 
-     */ 
-    public static class Comparator extends WritableComparator {
-        public Comparator() {
-            super(BooleanWritable.class);
-        }
-
-        public int compare(byte[] b1, int s1, int l1,
-                           byte[] b2, int s2, int l2) {
-            boolean a = (readInt(b1, s1) == 1) ? true : false;
-            boolean b = (readInt(b2, s2) == 1) ? true : false;
-            return ((a == b) ? 0 : (a == false) ? -1 : 1);
-        }
-    }
-
-
-    static {
-      WritableComparator.define(BooleanWritable.class, new Comparator());
-    }
+  static {
+    WritableComparator.define(BooleanWritable.class, new Comparator());
+  }
 }

+ 1 - 1
src/java/org/apache/hadoop/io/BytesWritable.java

@@ -194,7 +194,7 @@ public class BytesWritable implements WritableComparable {
      * Compare the buffers in serialized form.
      */
     public int compare(byte[] b1, int s1, int l1,
-        byte[] b2, int s2, int l2) {
+                       byte[] b2, int s2, int l2) {
       int size1 = readInt(b1, s1);
       int size2 = readInt(b2, s2);
       return compareBytes(b1,s1+4, size1, b2, s2+4, size2);

+ 181 - 181
src/java/org/apache/hadoop/io/SequenceFile.java

@@ -82,7 +82,7 @@ public class SequenceFile {
   static public CompressionType getCompressionType(Configuration job) {
     String name = job.get("io.seqfile.compression.type");
     return name == null ? CompressionType.RECORD : 
-                          CompressionType.valueOf(name);
+      CompressionType.valueOf(name);
   }
   
   /**
@@ -106,9 +106,9 @@ public class SequenceFile {
    * @throws IOException
    */
   public static Writer 
-  createWriter(FileSystem fs, Configuration conf, Path name, 
-      Class keyClass, Class valClass) 
-  throws IOException {
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass) 
+    throws IOException {
     return createWriter(fs,conf,name,keyClass,valClass,
                         getCompressionType(conf));
   }
@@ -125,19 +125,19 @@ public class SequenceFile {
    * @throws IOException
    */
   public static Writer 
-  createWriter(FileSystem fs, Configuration conf, Path name, 
-      Class keyClass, Class valClass, CompressionType compressionType) 
-  throws IOException {
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, CompressionType compressionType) 
+    throws IOException {
     Writer writer = null;
     
     if (compressionType == CompressionType.NONE) {
       writer = new Writer(fs, conf, name, keyClass, valClass, null, new Metadata());
     } else if (compressionType == CompressionType.RECORD) {
       writer = new RecordCompressWriter(fs, conf, name, keyClass, valClass, 
-          new DefaultCodec());
+                                        new DefaultCodec());
     } else if (compressionType == CompressionType.BLOCK){
       writer = new BlockCompressWriter(fs, conf, name, keyClass, valClass, 
-          new DefaultCodec());
+                                       new DefaultCodec());
     }
     
     return writer;
@@ -156,19 +156,19 @@ public class SequenceFile {
    * @throws IOException
    */
   public static Writer
-  createWriter(FileSystem fs, Configuration conf, Path name, 
-      Class keyClass, Class valClass, CompressionType compressionType,
-      Progressable progress) throws IOException {
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, CompressionType compressionType,
+                 Progressable progress) throws IOException {
     Writer writer = null;
     
     if (compressionType == CompressionType.NONE) {
       writer = new Writer(fs, conf, name, keyClass, valClass, progress, new Metadata()); 
     } else if (compressionType == CompressionType.RECORD) {
       writer = new RecordCompressWriter(fs, conf, name, 
-          keyClass, valClass, new DefaultCodec(), progress, new Metadata());
+                                        keyClass, valClass, new DefaultCodec(), progress, new Metadata());
     } else if (compressionType == CompressionType.BLOCK){
       writer = new BlockCompressWriter(fs, conf, name, 
-          keyClass, valClass, new DefaultCodec(), progress, new Metadata());
+                                       keyClass, valClass, new DefaultCodec(), progress, new Metadata());
     }
     
     return writer;
@@ -187,15 +187,15 @@ public class SequenceFile {
    * @throws IOException
    */
   public static Writer 
-  createWriter(FileSystem fs, Configuration conf, Path name, 
-      Class keyClass, Class valClass, 
-      CompressionType compressionType, CompressionCodec codec) 
-  throws IOException {
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, 
+                 CompressionType compressionType, CompressionCodec codec) 
+    throws IOException {
     if ((codec instanceof GzipCodec) && 
         !NativeCodeLoader.isNativeCodeLoaded() && 
         !ZlibFactory.isNativeZlibLoaded()) {
       throw new IllegalArgumentException("SequenceFile doesn't work with " +
-          "GzipCodec without native-hadoop code!");
+                                         "GzipCodec without native-hadoop code!");
     }
     
     Writer writer = null;
@@ -204,10 +204,10 @@ public class SequenceFile {
       writer = new Writer(fs, conf, name, keyClass, valClass); 
     } else if (compressionType == CompressionType.RECORD) {
       writer = new RecordCompressWriter(fs, conf, name, keyClass, valClass, 
-          codec);
+                                        codec);
     } else if (compressionType == CompressionType.BLOCK){
       writer = new BlockCompressWriter(fs, conf, name, keyClass, valClass, 
-          codec);
+                                       codec);
     }
     
     return writer;
@@ -228,15 +228,15 @@ public class SequenceFile {
    * @throws IOException
    */
   public static Writer
-  createWriter(FileSystem fs, Configuration conf, Path name, 
-      Class keyClass, Class valClass, 
-      CompressionType compressionType, CompressionCodec codec,
-      Progressable progress, Metadata metadata) throws IOException {
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, 
+                 CompressionType compressionType, CompressionCodec codec,
+                 Progressable progress, Metadata metadata) throws IOException {
     if ((codec instanceof GzipCodec) && 
         !NativeCodeLoader.isNativeCodeLoaded() && 
         !ZlibFactory.isNativeZlibLoaded()) {
       throw new IllegalArgumentException("SequenceFile doesn't work with " +
-          "GzipCodec without native-hadoop code!");
+                                         "GzipCodec without native-hadoop code!");
     }
     
     Writer writer = null;
@@ -245,10 +245,10 @@ public class SequenceFile {
       writer = new Writer(fs, conf, name, keyClass, valClass, progress, metadata);
     } else if (compressionType == CompressionType.RECORD) {
       writer = new RecordCompressWriter(fs, conf, name, 
-          keyClass, valClass, codec, progress, metadata);
+                                        keyClass, valClass, codec, progress, metadata);
     } else if (compressionType == CompressionType.BLOCK){
       writer = new BlockCompressWriter(fs, conf, name, 
-          keyClass, valClass, codec, progress, metadata);
+                                       keyClass, valClass, codec, progress, metadata);
     }
     
     return writer;
@@ -268,12 +268,12 @@ public class SequenceFile {
    * @throws IOException
    */
   public static Writer
-  createWriter(FileSystem fs, Configuration conf, Path name, 
-      Class keyClass, Class valClass, 
-      CompressionType compressionType, CompressionCodec codec,
-      Progressable progress) throws IOException {
+    createWriter(FileSystem fs, Configuration conf, Path name, 
+                 Class keyClass, Class valClass, 
+                 CompressionType compressionType, CompressionCodec codec,
+                 Progressable progress) throws IOException {
     Writer writer = createWriter(fs, conf, name, keyClass, valClass, 
-        compressionType, codec, progress, new Metadata());
+                                 compressionType, codec, progress, new Metadata());
     return writer;
   }
 
@@ -289,15 +289,15 @@ public class SequenceFile {
    * @throws IOException
    */
   private static Writer
-  createWriter(Configuration conf, FSDataOutputStream out, 
-      Class keyClass, Class valClass, boolean compress, boolean blockCompress,
-      CompressionCodec codec, Metadata metadata)
-  throws IOException {
+    createWriter(Configuration conf, FSDataOutputStream out, 
+                 Class keyClass, Class valClass, boolean compress, boolean blockCompress,
+                 CompressionCodec codec, Metadata metadata)
+    throws IOException {
     if ((codec instanceof GzipCodec) && 
         !NativeCodeLoader.isNativeCodeLoaded() && 
         !ZlibFactory.isNativeZlibLoaded()) {
       throw new IllegalArgumentException("SequenceFile doesn't work with " +
-          "GzipCodec without native-hadoop code!");
+                                         "GzipCodec without native-hadoop code!");
     }
 
     Writer writer = null;
@@ -324,12 +324,12 @@ public class SequenceFile {
    * @throws IOException
    */
   private static Writer
-  createWriter(Configuration conf, FSDataOutputStream out, 
-      Class keyClass, Class valClass, boolean compress, boolean blockCompress,
-      CompressionCodec codec)
-  throws IOException {
+    createWriter(Configuration conf, FSDataOutputStream out, 
+                 Class keyClass, Class valClass, boolean compress, boolean blockCompress,
+                 CompressionCodec codec)
+    throws IOException {
     Writer writer = createWriter(conf, out, keyClass, valClass, compress, 
-        blockCompress, codec, new Metadata());
+                                 blockCompress, codec, new Metadata());
     return writer;
   }
 
@@ -347,15 +347,15 @@ public class SequenceFile {
    * @throws IOException
    */
   public static Writer
-  createWriter(Configuration conf, FSDataOutputStream out, 
-      Class keyClass, Class valClass, CompressionType compressionType,
-      CompressionCodec codec, Metadata metadata)
-  throws IOException {
+    createWriter(Configuration conf, FSDataOutputStream out, 
+                 Class keyClass, Class valClass, CompressionType compressionType,
+                 CompressionCodec codec, Metadata metadata)
+    throws IOException {
     if ((codec instanceof GzipCodec) && 
         !NativeCodeLoader.isNativeCodeLoaded() && 
         !ZlibFactory.isNativeZlibLoaded()) {
       throw new IllegalArgumentException("SequenceFile doesn't work with " +
-          "GzipCodec without native-hadoop code!");
+                                         "GzipCodec without native-hadoop code!");
     }
 
     Writer writer = null;
@@ -383,12 +383,12 @@ public class SequenceFile {
    * @throws IOException
    */
   public static Writer
-  createWriter(Configuration conf, FSDataOutputStream out, 
-      Class keyClass, Class valClass, CompressionType compressionType,
-      CompressionCodec codec)
-  throws IOException {
+    createWriter(Configuration conf, FSDataOutputStream out, 
+                 Class keyClass, Class valClass, CompressionType compressionType,
+                 CompressionCodec codec)
+    throws IOException {
     Writer writer = createWriter(conf, out, keyClass, valClass, compressionType,
-        codec, new Metadata());
+                                 codec, new Metadata());
     return writer;
   }
   
@@ -401,14 +401,14 @@ public class SequenceFile {
      * @throws IOException
      */
     public void writeUncompressedBytes(DataOutputStream outStream)
-    throws IOException;
+      throws IOException;
 
     /** Write compressed bytes to outStream. 
      * Note: that it will NOT compress the bytes if they are not compressed.
      * @param outStream : Stream to write compressed bytes into.
      */
     public void writeCompressedBytes(DataOutputStream outStream) 
-    throws IllegalArgumentException, IOException;
+      throws IllegalArgumentException, IOException;
   }
   
   private static class UncompressedBytes implements ValueBytes {
@@ -433,14 +433,14 @@ public class SequenceFile {
     }
     
     public void writeUncompressedBytes(DataOutputStream outStream)
-    throws IOException {
+      throws IOException {
       outStream.write(data, 0, dataSize);
     }
 
     public void writeCompressedBytes(DataOutputStream outStream) 
-    throws IllegalArgumentException, IOException {
+      throws IllegalArgumentException, IOException {
       throw 
-      new IllegalArgumentException("UncompressedBytes cannot be compressed!");
+        new IllegalArgumentException("UncompressedBytes cannot be compressed!");
     }
 
   } // UncompressedBytes
@@ -471,7 +471,7 @@ public class SequenceFile {
     }
     
     public void writeUncompressedBytes(DataOutputStream outStream)
-    throws IOException {
+      throws IOException {
       if (decompressedStream == null) {
         rawData = new DataInputBuffer();
         decompressedStream = codec.createInputStream(rawData);
@@ -488,7 +488,7 @@ public class SequenceFile {
     }
 
     public void writeCompressedBytes(DataOutputStream outStream) 
-    throws IllegalArgumentException, IOException {
+      throws IllegalArgumentException, IOException {
       outStream.write(data, 0, dataSize);
     }
 
@@ -562,10 +562,10 @@ public class SequenceFile {
         Map.Entry<Text, Text> en1 = (Map.Entry<Text, Text>)iter1.next();
         Map.Entry<Text, Text> en2 = (Map.Entry<Text, Text>)iter2.next();
         if (!en1.getKey().equals(en2.getKey())) {
-           return false;
+          return false;
         }
         if (!en1.getValue().equals(en2.getValue())) {
-           return false;
+          return false;
         }
       }
       if (iter1.hasNext() || iter2.hasNext()) {
@@ -624,14 +624,14 @@ public class SequenceFile {
     
     /** Create the named file. */
     public Writer(FileSystem fs, Configuration conf, Path name, 
-        Class keyClass, Class valClass)
+                  Class keyClass, Class valClass)
       throws IOException {
       this(fs, conf, name, keyClass, valClass, null, new Metadata());
     }
     
     /** Create the named file with write-progress reporter. */
     public Writer(FileSystem fs, Configuration conf, Path name, 
-        Class keyClass, Class valClass, Progressable progress, Metadata metadata)
+                  Class keyClass, Class valClass, Progressable progress, Metadata metadata)
       throws IOException {
       init(name, conf, fs.create(name, progress), keyClass, valClass, false, null, metadata);
       initializeFileHeader();
@@ -641,8 +641,8 @@ public class SequenceFile {
     
     /** Write to an arbitrary stream using a specified buffer size. */
     private Writer(Configuration conf, FSDataOutputStream out, 
-        Class keyClass, Class valClass, Metadata metadata)
-    throws IOException {
+                   Class keyClass, Class valClass, Metadata metadata)
+      throws IOException {
       init(null, conf, out, keyClass, valClass, false, null, metadata);
       
       initializeFileHeader();
@@ -652,13 +652,13 @@ public class SequenceFile {
 
     /** Write the initial part of file header. */
     void initializeFileHeader() 
-    throws IOException{
+      throws IOException{
       out.write(VERSION);
     }
 
     /** Write the final part of file header. */
     void finalizeFileHeader() 
-    throws IOException{
+      throws IOException{
       out.write(sync);                       // write the sync bytes
       out.flush();                           // flush header
     }
@@ -668,7 +668,7 @@ public class SequenceFile {
     
     /** Write and flush the file header. */
     void writeFileHeader() 
-    throws IOException {
+      throws IOException {
       Text.writeString(out, keyClass.getName());
       Text.writeString(out, valClass.getName());
       
@@ -683,9 +683,9 @@ public class SequenceFile {
     
     /** Initialize. */
     void init(Path name, Configuration conf, FSDataOutputStream out,
-                      Class keyClass, Class valClass,
-                      boolean compress, CompressionCodec codec, Metadata metadata) 
-    throws IOException {
+              Class keyClass, Class valClass,
+              boolean compress, CompressionCodec codec, Metadata metadata) 
+      throws IOException {
       this.target = name;
       this.conf = conf;
       this.out = out;
@@ -741,10 +741,10 @@ public class SequenceFile {
       throws IOException {
       if (key.getClass() != keyClass)
         throw new IOException("wrong key class: "+key.getClass().getName()
-            +" is not "+keyClass);
+                              +" is not "+keyClass);
       if (val.getClass() != valClass)
         throw new IOException("wrong value class: "+val.getClass().getName()
-            +" is not "+valClass);
+                              +" is not "+valClass);
 
       buffer.reset();
 
@@ -772,8 +772,8 @@ public class SequenceFile {
     }
 
     public synchronized void appendRaw(
-        byte[] keyData, int keyOffset, int keyLength, ValueBytes val) 
-    throws IOException {
+                                       byte[] keyData, int keyOffset, int keyLength, ValueBytes val) 
+      throws IOException {
       if (keyLength == 0)
         throw new IOException("zero length keys not allowed: " + keyLength);
 
@@ -808,8 +808,8 @@ public class SequenceFile {
     
     /** Create the named file. */
     public RecordCompressWriter(FileSystem fs, Configuration conf, Path name, 
-        Class keyClass, Class valClass, CompressionCodec codec) 
-    throws IOException {
+                                Class keyClass, Class valClass, CompressionCodec codec) 
+      throws IOException {
       super.init(name, conf, fs.create(name), keyClass, valClass, true, codec, new Metadata());
       
       initializeFileHeader();
@@ -819,11 +819,11 @@ public class SequenceFile {
     
     /** Create the named file with write-progress reporter. */
     public RecordCompressWriter(FileSystem fs, Configuration conf, Path name, 
-        Class keyClass, Class valClass, CompressionCodec codec,
-        Progressable progress, Metadata metadata)
-    throws IOException {
+                                Class keyClass, Class valClass, CompressionCodec codec,
+                                Progressable progress, Metadata metadata)
+      throws IOException {
       super.init(name, conf, fs.create(name, progress), 
-          keyClass, valClass, true, codec, metadata);
+                 keyClass, valClass, true, codec, metadata);
       
       initializeFileHeader();
       writeFileHeader();
@@ -832,15 +832,15 @@ public class SequenceFile {
     
     /** Create the named file with write-progress reporter. */
     public RecordCompressWriter(FileSystem fs, Configuration conf, Path name, 
-        Class keyClass, Class valClass, CompressionCodec codec,
-        Progressable progress)
-    throws IOException {
+                                Class keyClass, Class valClass, CompressionCodec codec,
+                                Progressable progress)
+      throws IOException {
       this(fs, conf, name, keyClass, valClass, codec, progress, new Metadata());
     }
     
     /** Write to an arbitrary stream using a specified buffer size. */
     private RecordCompressWriter(Configuration conf, FSDataOutputStream out,
-                   Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
+                                 Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
       throws IOException {
       super.init(null, conf, out, keyClass, valClass, true, codec, metadata);
       
@@ -858,10 +858,10 @@ public class SequenceFile {
       throws IOException {
       if (key.getClass() != keyClass)
         throw new IOException("wrong key class: "+key.getClass().getName()
-            +" is not "+keyClass);
+                              +" is not "+keyClass);
       if (val.getClass() != valClass)
         throw new IOException("wrong value class: "+val.getClass().getName()
-            +" is not "+valClass);
+                              +" is not "+valClass);
 
       buffer.reset();
 
@@ -886,9 +886,9 @@ public class SequenceFile {
 
     /** Append a key/value pair. */
     public synchronized void appendRaw(
-        byte[] keyData, int keyOffset, int keyLength,
-        ValueBytes val
-        ) throws IOException {
+                                       byte[] keyData, int keyOffset, int keyLength,
+                                       ValueBytes val
+                                       ) throws IOException {
 
       if (keyLength == 0)
         throw new IOException("zero length keys not allowed");
@@ -924,8 +924,8 @@ public class SequenceFile {
     
     /** Create the named file. */
     public BlockCompressWriter(FileSystem fs, Configuration conf, Path name, 
-        Class keyClass, Class valClass, CompressionCodec codec) 
-    throws IOException {
+                               Class keyClass, Class valClass, CompressionCodec codec) 
+      throws IOException {
       super.init(name, conf, fs.create(name), keyClass, valClass, true, codec, new Metadata());
       init(conf.getInt("io.seqfile.compress.blocksize", 1000000));
       
@@ -936,11 +936,11 @@ public class SequenceFile {
     
     /** Create the named file with write-progress reporter. */
     public BlockCompressWriter(FileSystem fs, Configuration conf, Path name, 
-        Class keyClass, Class valClass, CompressionCodec codec,
-        Progressable progress, Metadata metadata)
-    throws IOException {
+                               Class keyClass, Class valClass, CompressionCodec codec,
+                               Progressable progress, Metadata metadata)
+      throws IOException {
       super.init(name, conf, fs.create(name, progress), keyClass, valClass, 
-          true, codec, metadata);
+                 true, codec, metadata);
       init(conf.getInt("io.seqfile.compress.blocksize", 1000000));
       
       initializeFileHeader();
@@ -950,15 +950,15 @@ public class SequenceFile {
     
     /** Create the named file with write-progress reporter. */
     public BlockCompressWriter(FileSystem fs, Configuration conf, Path name, 
-        Class keyClass, Class valClass, CompressionCodec codec,
-        Progressable progress)
-    throws IOException {
+                               Class keyClass, Class valClass, CompressionCodec codec,
+                               Progressable progress)
+      throws IOException {
       this(fs, conf, name, keyClass, valClass, codec, progress, new Metadata());
     }
     
     /** Write to an arbitrary stream using a specified buffer size. */
     private BlockCompressWriter(Configuration conf, FSDataOutputStream out,
-                   Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
+                                Class keyClass, Class valClass, CompressionCodec codec, Metadata metadata)
       throws IOException {
       super.init(null, conf, out, keyClass, valClass, true, codec, metadata);
       init(1000000);
@@ -978,12 +978,12 @@ public class SequenceFile {
     
     /** Workhorse to check and write out compressed data/lengths */
     private synchronized 
-    void writeBuffer(DataOutputBuffer uncompressedDataBuffer) 
-    throws IOException {
+      void writeBuffer(DataOutputBuffer uncompressedDataBuffer) 
+      throws IOException {
       deflateFilter.resetState();
       buffer.reset();
       deflateOut.write(uncompressedDataBuffer.getData(), 0, 
-          uncompressedDataBuffer.getLength());
+                       uncompressedDataBuffer.getLength());
       deflateOut.flush();
       deflateFilter.finish();
       
@@ -1070,9 +1070,9 @@ public class SequenceFile {
     
     /** Append a key/value pair. */
     public synchronized void appendRaw(
-        byte[] keyData, int keyOffset, int keyLength,
-        ValueBytes val
-        ) throws IOException {
+                                       byte[] keyData, int keyOffset, int keyLength,
+                                       ValueBytes val
+                                       ) throws IOException {
       
       if (keyLength == 0)
         throw new IOException("zero length keys not allowed");
@@ -1218,7 +1218,7 @@ public class SequenceFile {
           try {
             Class codecClass = conf.getClassByName(codecClassname);
             this.codec = (CompressionCodec)
-                 ReflectionUtils.newInstance(codecClass, conf);
+              ReflectionUtils.newInstance(codecClass, conf);
           } catch (ClassNotFoundException cnfe) {
             throw new IllegalArgumentException("Unknown codec: " + 
                                                codecClassname, cnfe);
@@ -1293,7 +1293,7 @@ public class SequenceFile {
     
     /** Read a compressed buffer */
     private synchronized void readBuffer(DataInputBuffer buffer, 
-        CompressionInputStream filter) throws IOException {
+                                         CompressionInputStream filter) throws IOException {
       // Read data into a temporary buffer
       DataOutputBuffer dataBuffer = new DataOutputBuffer();
       int dataBufferLength = WritableUtils.readVInt(in);
@@ -1378,7 +1378,7 @@ public class SequenceFile {
         if (skipValBytes > 0) {
           if (valIn.skipBytes(skipValBytes) != skipValBytes) {
             throw new IOException("Failed to seek to " + currentKey + 
-                "(th) value!");
+                                  "(th) value!");
           }
         }
       }
@@ -1390,7 +1390,7 @@ public class SequenceFile {
      * @throws IOException
      */
     public synchronized void getCurrentValue(Writable val) 
-    throws IOException {
+      throws IOException {
       if (val instanceof Configurable) {
         ((Configurable) val).setConf(this.conf);
       }
@@ -1404,8 +1404,8 @@ public class SequenceFile {
         if (valIn.read() > 0) {
           LOG.info("available bytes: " + valIn.available());
           throw new IOException(val+" read "+(valBuffer.getPosition()-keyLength)
-              + " bytes, should read " +
-              (valBuffer.getLength()-keyLength));
+                                + " bytes, should read " +
+                                (valBuffer.getLength()-keyLength));
         }
       } else {
         // Get the value
@@ -1428,7 +1428,7 @@ public class SequenceFile {
     public synchronized boolean next(Writable key) throws IOException {
       if (key.getClass() != keyClass)
         throw new IOException("wrong key class: "+key.getClass().getName()
-            +" is not "+keyClass);
+                              +" is not "+keyClass);
 
       if (!blockCompressed) {
         outBuf.reset();
@@ -1443,7 +1443,7 @@ public class SequenceFile {
         valBuffer.mark(0);
         if (valBuffer.getPosition() != keyLength)
           throw new IOException(key + " read " + valBuffer.getPosition()
-              + " bytes, should read " + keyLength);
+                                + " bytes, should read " + keyLength);
       } else {
         //Reset syncSeen
         syncSeen = false;
@@ -1489,7 +1489,7 @@ public class SequenceFile {
     }
     
     private synchronized int checkAndReadSync(int length) 
-    throws IOException {
+      throws IOException {
       if (version > 1 && sync != null &&
           length == SYNC_ESCAPE) {              // process a sync entry
         //LOG.info("sync@"+in.getPos());
@@ -1514,7 +1514,7 @@ public class SequenceFile {
       // Unsupported for block-compressed sequence files
       if (blockCompressed) {
         throw new IOException("Unsupported call for block-compressed" +
-            " SequenceFiles - use SequenceFile.Reader.next(DataOutputStream, ValueBytes)");
+                              " SequenceFiles - use SequenceFile.Reader.next(DataOutputStream, ValueBytes)");
       }
       if (in.getPos() >= end)
         return -1;
@@ -1548,7 +1548,7 @@ public class SequenceFile {
      * @throws IOException
      */
     public int nextRaw(DataOutputBuffer key, ValueBytes val) 
-    throws IOException {
+      throws IOException {
       if (!blockCompressed) {
         if (in.getPos() >= end) 
           return -1;
@@ -1607,7 +1607,7 @@ public class SequenceFile {
      * @throws IOException
      */
     public int nextRawKey(DataOutputBuffer key) 
-    throws IOException {
+      throws IOException {
       if (!blockCompressed) {
         if (in.getPos() >= end) 
           return -1;
@@ -1650,7 +1650,7 @@ public class SequenceFile {
      * @throws IOException
      */
     public int nextRawValue(ValueBytes val) 
-    throws IOException {
+      throws IOException {
       
       // Position stream to current value
       seekToCurrentValue();
@@ -1776,7 +1776,7 @@ public class SequenceFile {
 
     /** Sort and merge using an arbitrary {@link WritableComparator}. */
     public Sorter(FileSystem fs, WritableComparator comparator, Class valClass, 
-        Configuration conf) {
+                  Configuration conf) {
       this.fs = fs;
       this.comparator = comparator;
       this.keyClass = comparator.getKeyClass();
@@ -1827,7 +1827,7 @@ public class SequenceFile {
      * @return iterator the RawKeyValueIterator
      */
     public RawKeyValueIterator sortAndIterate(Path[] inFiles, Path tempDir, 
-                                    boolean deleteInput) throws IOException {
+                                              boolean deleteInput) throws IOException {
       Path outFile = new Path(tempDir + Path.SEPARATOR + "all.2");
       if (fs.exists(outFile)) {
         throw new IOException("already exists: " + outFile);
@@ -1915,15 +1915,15 @@ public class SequenceFile {
           int bytesProcessed = 0;
           rawKeys.reset();
           while (!atEof && 
-              bytesProcessed < memoryLimit && count < recordLimit) {
+                 bytesProcessed < memoryLimit && count < recordLimit) {
 
             // Read a record into buffer
             // Note: Attempt to re-use 'rawValue' as far as possible
             int keyOffset = rawKeys.getLength();       
             ValueBytes rawValue = 
               (count == keyOffsets.length || rawValues[count] == null) ? 
-                  in.createValueBytes() : 
-                  rawValues[count];
+              in.createValueBytes() : 
+              rawValues[count];
             int recordLength = in.nextRaw(rawKeys, rawValue);
             if (recordLength == -1) {
               in.close();
@@ -1959,7 +1959,7 @@ public class SequenceFile {
           rawBuffer = rawKeys.getData();
           sort(count);
           flush(count, bytesProcessed, isCompressed, isBlockCompressed, codec, 
-              segments==0 && atEof);
+                segments==0 && atEof);
           segments++;
         }
         return segments;
@@ -2002,8 +2002,8 @@ public class SequenceFile {
       }
 
       private void flush(int count, int bytesProcessed, boolean isCompressed, 
-          boolean isBlockCompressed, CompressionCodec codec, boolean done) 
-      throws IOException {
+                         boolean isBlockCompressed, CompressionCodec codec, boolean done) 
+        throws IOException {
         if (out == null) {
           outName = done ? outFile : outFile.suffix(".0");
           out = fs.create(outName);
@@ -2014,7 +2014,7 @@ public class SequenceFile {
 
         long segmentStart = out.getPos();
         Writer writer = createWriter(conf, out, keyClass, valClass, 
-            isCompressed, isBlockCompressed, codec);
+                                     isCompressed, isBlockCompressed, codec);
         
         if (!done) {
           writer.sync = null;                     // disable sync on temp files
@@ -2076,16 +2076,16 @@ public class SequenceFile {
       Progress getProgress();
     }    
     
-  /**
-   * Merges the list of segments of type <code>SegmentDescriptor</code>
-   * @param segments the list of SegmentDescriptors
+    /**
+     * Merges the list of segments of type <code>SegmentDescriptor</code>
+     * @param segments the list of SegmentDescriptors
      * @param tmpDir the directory to write temporary files into
-   * @return RawKeyValueIterator
-   * @throws IOException
-   */
+     * @return RawKeyValueIterator
+     * @throws IOException
+     */
     public RawKeyValueIterator merge(List <SegmentDescriptor> segments, 
                                      Path tmpDir) 
-    throws IOException {
+      throws IOException {
       MergeQueue mQueue = new MergeQueue(segments, tmpDir);
       return mQueue.merge();
     }
@@ -2102,7 +2102,7 @@ public class SequenceFile {
      */
     public RawKeyValueIterator merge(Path [] inNames, boolean deleteInputs,
                                      Path tmpDir) 
-    throws IOException {
+      throws IOException {
       return merge(inNames, deleteInputs, 
                    (inNames.length < factor) ? inNames.length : factor,
                    tmpDir);
@@ -2120,12 +2120,12 @@ public class SequenceFile {
      */
     public RawKeyValueIterator merge(Path [] inNames, boolean deleteInputs,
                                      int factor, Path tmpDir) 
-    throws IOException {
+      throws IOException {
       //get the segments from inNames
       ArrayList <SegmentDescriptor> a = new ArrayList <SegmentDescriptor>();
       for (int i = 0; i < inNames.length; i++) {
         SegmentDescriptor s = new SegmentDescriptor(0, 
-                              fs.getLength(inNames[i]), inNames[i]);
+                                                    fs.getLength(inNames[i]), inNames[i]);
         s.preserveInput(!deleteInputs);
         s.doSync();
         a.add(s);
@@ -2146,7 +2146,7 @@ public class SequenceFile {
      */
     public RawKeyValueIterator merge(Path [] inNames, Path tempDir, 
                                      boolean deleteInputs) 
-    throws IOException {
+      throws IOException {
       //outFile will basically be used as prefix for temp files for the
       //intermediate merge outputs           
       this.outFile = new Path(tempDir + Path.SEPARATOR + "merged");
@@ -2154,7 +2154,7 @@ public class SequenceFile {
       ArrayList <SegmentDescriptor> a = new ArrayList <SegmentDescriptor>();
       for (int i = 0; i < inNames.length; i++) {
         SegmentDescriptor s = new SegmentDescriptor(0, 
-                              fs.getLength(inNames[i]), inNames[i]);
+                                                    fs.getLength(inNames[i]), inNames[i]);
         s.preserveInput(!deleteInputs);
         s.doSync();
         a.add(s);
@@ -2177,8 +2177,8 @@ public class SequenceFile {
      * @deprecated call  #cloneFileAttributes(Path,Path,Progressable) instead
      */
     public Writer cloneFileAttributes(FileSystem ignoredFileSys,
-                  Path inputFile, Path outputFile, Progressable prog) 
-    throws IOException {
+                                      Path inputFile, Path outputFile, Progressable prog) 
+      throws IOException {
       return cloneFileAttributes(inputFile, outputFile, prog);
     }
 
@@ -2193,7 +2193,7 @@ public class SequenceFile {
      * @throws IOException
      */
     public Writer cloneFileAttributes(Path inputFile, Path outputFile, 
-                  Progressable prog) throws IOException {
+                                      Progressable prog) throws IOException {
       FileSystem srcFileSys = inputFile.getFileSystem(conf);
       Reader reader = new Reader(srcFileSys, inputFile, 4096, conf);
       boolean compress = reader.isCompressed();
@@ -2205,12 +2205,12 @@ public class SequenceFile {
       FSDataOutputStream out;
       if (prog != null)
         out = dstFileSys.create(outputFile, true, 
-            conf.getInt("io.file.buffer.size", 4096), prog);
+                                conf.getInt("io.file.buffer.size", 4096), prog);
       else
         out = dstFileSys.create(outputFile, true, 
-            conf.getInt("io.file.buffer.size", 4096));
+                                conf.getInt("io.file.buffer.size", 4096));
       Writer writer = createWriter(conf, out, keyClass, valClass, compress, 
-                          blockCompress, codec);
+                                   blockCompress, codec);
       return writer;
     }
 
@@ -2222,7 +2222,7 @@ public class SequenceFile {
      * @throws IOException
      */
     public void writeFile(RawKeyValueIterator records, Writer writer) 
-    throws IOException {
+      throws IOException {
       while(records.next()) {
         writer.appendRaw(records.getKey().getData(), 0, 
                          records.getKey().getLength(), records.getValue());
@@ -2251,7 +2251,7 @@ public class SequenceFile {
     private int mergePass(Path tmpDir) throws IOException {
       LOG.debug("running merge pass");
       Writer writer = cloneFileAttributes(
-              outFile.suffix(".0"), outFile, null);
+                                          outFile.suffix(".0"), outFile, null);
       RawKeyValueIterator r = merge(outFile.suffix(".0"), 
                                     outFile.suffix(".0.index"), tmpDir);
       writeFile(r, writer);
@@ -2268,7 +2268,7 @@ public class SequenceFile {
      * @throws IOException
      */
     private RawKeyValueIterator merge(Path inName, Path indexIn, Path tmpDir) 
-    throws IOException {
+      throws IOException {
       //get the segments from indexIn
       //we create a SegmentContainer so that we can track segments belonging to
       //inName and delete inName as soon as we see that we have looked at all
@@ -2281,7 +2281,7 @@ public class SequenceFile {
     
     /** This class implements the core of the merge logic */
     private class MergeQueue extends PriorityQueue 
-    implements RawKeyValueIterator {
+      implements RawKeyValueIterator {
       private boolean compress;
       private boolean blockCompress;
       private DataOutputBuffer rawKey = new DataOutputBuffer();
@@ -2300,7 +2300,7 @@ public class SequenceFile {
           compress = stream.in.isCompressed();
           blockCompress = stream.in.isBlockCompressed();
         } else if (compress != stream.in.isCompressed() || 
-            blockCompress != stream.in.isBlockCompressed()) {
+                   blockCompress != stream.in.isBlockCompressed()) {
           throw new IOException("All merged files must be compressed or not.");
         } 
         super.put(stream);
@@ -2323,8 +2323,8 @@ public class SequenceFile {
         SegmentDescriptor msa = (SegmentDescriptor)a;
         SegmentDescriptor msb = (SegmentDescriptor)b;
         return comparator.compare(msa.getKey().getData(), 0, 
-            msa.getKey().getLength(), msb.getKey().getData(), 0, 
-            msb.getKey().getLength()) < 0;
+                                  msa.getKey().getLength(), msb.getKey().getData(), 0, 
+                                  msb.getKey().getLength()) < 0;
       }
       public void close() throws IOException {
         SegmentDescriptor ms;                           // close inputs
@@ -2436,8 +2436,8 @@ public class SequenceFile {
                                                 tmpFilename.toString());
             LOG.info("writing intermediate results to " + outputFile);
             Writer writer = cloneFileAttributes(
-                      fs.makeQualified(segmentsToMerge.get(0).segmentPathName), 
-                      fs.makeQualified(outputFile), null);
+                                                fs.makeQualified(segmentsToMerge.get(0).segmentPathName), 
+                                                fs.makeQualified(outputFile), null);
             writer.sync = null; //disable sync for temp files
             writeFile(this, writer);
             writer.close();
@@ -2447,7 +2447,7 @@ public class SequenceFile {
             this.close();
             
             SegmentDescriptor tempSegment = 
-                 new SegmentDescriptor(0, fs.getLength(outputFile), outputFile);
+              new SegmentDescriptor(0, fs.getLength(outputFile), outputFile);
             //put the segment back in the TreeMap
             sortedSegmentSizes.put(tempSegment, null);
             numSegments = sortedSegmentSizes.size();
@@ -2476,7 +2476,7 @@ public class SequenceFile {
         if (numDescriptors > sortedSegmentSizes.size())
           numDescriptors = sortedSegmentSizes.size();
         SegmentDescriptor[] SegmentDescriptors = 
-                                   new SegmentDescriptor[numDescriptors];
+          new SegmentDescriptor[numDescriptors];
         Iterator iter = sortedSegmentSizes.keySet().iterator();
         int i = 0;
         while (i < numDescriptors) {
@@ -2507,7 +2507,7 @@ public class SequenceFile {
        * @param segmentPathName the path name of the file containing the segment
        */
       public SegmentDescriptor (long segmentOffset, long segmentLength, 
-              Path segmentPathName) {
+                                Path segmentPathName) {
         this.segmentOffset = segmentOffset;
         this.segmentLength = segmentLength;
         this.segmentPathName = segmentPathName;
@@ -2534,7 +2534,7 @@ public class SequenceFile {
           return (this.segmentOffset < that.segmentOffset ? -1 : 1);
         }
         return (this.segmentPathName.toString()).
-                compareTo(that.segmentPathName.toString());
+          compareTo(that.segmentPathName.toString());
       }
 
       /** Fills up the rawKey object with the key returned by the Reader
@@ -2543,25 +2543,25 @@ public class SequenceFile {
        */
       public boolean nextRawKey() throws IOException {
         if (in == null) {
-        int bufferSize = conf.getInt("io.file.buffer.size", 4096); 
-        if (fs.getUri().getScheme().startsWith("ramfs")) {
-          bufferSize = conf.getInt("io.bytes.per.checksum", 512);
-        }
-        Reader reader = new Reader(fs, segmentPathName, 
-            bufferSize, segmentOffset, 
-                segmentLength, conf);
+          int bufferSize = conf.getInt("io.file.buffer.size", 4096); 
+          if (fs.getUri().getScheme().startsWith("ramfs")) {
+            bufferSize = conf.getInt("io.bytes.per.checksum", 512);
+          }
+          Reader reader = new Reader(fs, segmentPathName, 
+                                     bufferSize, segmentOffset, 
+                                     segmentLength, conf);
         
-        //sometimes we ignore syncs especially for temp merge files
-        if (ignoreSync) reader.sync = null;
-
-        if (reader.keyClass != keyClass)
-          throw new IOException("wrong key class: " + reader.getKeyClass() +
-                                " is not " + keyClass);
-        if (reader.valClass != valClass)
-          throw new IOException("wrong value class: "+reader.getValueClass()+
-                                " is not " + valClass);
-        this.in = reader;
-        rawKey = new DataOutputBuffer();
+          //sometimes we ignore syncs especially for temp merge files
+          if (ignoreSync) reader.sync = null;
+
+          if (reader.keyClass != keyClass)
+            throw new IOException("wrong key class: " + reader.getKeyClass() +
+                                  " is not " + keyClass);
+          if (reader.valClass != valClass)
+            throw new IOException("wrong value class: "+reader.getValueClass()+
+                                  " is not " + valClass);
+          this.in = reader;
+          rawKey = new DataOutputBuffer();
         }
         rawKey.reset();
         int keyLength = 
@@ -2616,7 +2616,7 @@ public class SequenceFile {
        * @param parent the parent SegmentContainer that holds the segment
        */
       public LinkedSegmentsDescriptor (long segmentOffset, long segmentLength, 
-              Path segmentPathName, SegmentContainer parent) {
+                                       Path segmentPathName, SegmentContainer parent) {
         super(segmentOffset, segmentLength, segmentPathName);
         this.parentContainer = parent;
       }
@@ -2640,7 +2640,7 @@ public class SequenceFile {
       
       //the list of segments read from the file
       private ArrayList <SegmentDescriptor> segments = 
-                                   new ArrayList <SegmentDescriptor>();
+        new ArrayList <SegmentDescriptor>();
       /** This constructor is there primarily to serve the sort routine that 
        * generates a single output file with an associated index file */
       public SegmentContainer(Path inName, Path indexIn) throws IOException {
@@ -2652,7 +2652,7 @@ public class SequenceFile {
           long segmentLength = WritableUtils.readVLong(fsIndexIn);
           Path segmentName = inName;
           segments.add(new LinkedSegmentsDescriptor(segmentOffset, 
-                                 segmentLength, segmentName, this));
+                                                    segmentLength, segmentName, this));
         }
         fsIndexIn.close();
         fs.delete(indexIn);

+ 3 - 3
src/java/org/apache/hadoop/io/Text.java

@@ -308,8 +308,8 @@ public class Text implements WritableComparable {
   }
   
   public static String decode(byte[] utf8, int start, int length) 
-      throws CharacterCodingException {
-      return decode(ByteBuffer.wrap(utf8, start, length), true);
+    throws CharacterCodingException {
+    return decode(ByteBuffer.wrap(utf8, start, length), true);
   }
   
   /**
@@ -414,7 +414,7 @@ public class Text implements WritableComparable {
    * @throws MalformedInputException if the byte array contains invalid utf-8
    */
   public static void validateUTF8(byte[] utf8) throws MalformedInputException {
-     validateUTF8(utf8, 0, utf8.length);     
+    validateUTF8(utf8, 0, utf8.length);     
   }
   
   /**

+ 25 - 25
src/java/org/apache/hadoop/io/TwoDArrayWritable.java

@@ -36,16 +36,16 @@ public class TwoDArrayWritable implements Writable {
   }
 
   public Object toArray() {
-      int dimensions[] = {values.length, 0};
-      Object result = Array.newInstance(valueClass, dimensions);
-      for (int i = 0; i < values.length; i++) {
-          Object resultRow = Array.newInstance(valueClass, values[i].length);
-          Array.set(result, i, resultRow);
-          for (int j = 0; j < values[i].length; j++) {
-              Array.set(resultRow, j, values[i][j]);
-          }
+    int dimensions[] = {values.length, 0};
+    Object result = Array.newInstance(valueClass, dimensions);
+    for (int i = 0; i < values.length; i++) {
+      Object resultRow = Array.newInstance(valueClass, values[i].length);
+      Array.set(result, i, resultRow);
+      for (int j = 0; j < values[i].length; j++) {
+        Array.set(resultRow, j, values[i][j]);
       }
-      return result;
+    }
+    return result;
   }
 
   public void set(Writable[][] values) { this.values = values; }
@@ -56,35 +56,35 @@ public class TwoDArrayWritable implements Writable {
     // construct matrix
     values = new Writable[in.readInt()][];          
     for (int i = 0; i < values.length; i++) {
-        values[i] = new Writable[in.readInt()];
+      values[i] = new Writable[in.readInt()];
     }
 
     // construct values
     for (int i = 0; i < values.length; i++) {
-        for (int j = 0; j < values[i].length; j++) {
-            Writable value;                             // construct value
-            try {
-                value = (Writable)valueClass.newInstance();
-            } catch (InstantiationException e) {
-                throw new RuntimeException(e.toString());
-            } catch (IllegalAccessException e) {
-                throw new RuntimeException(e.toString());
-            }
-            value.readFields(in);                       // read a value
-            values[i][j] = value;                       // store it in values
+      for (int j = 0; j < values[i].length; j++) {
+        Writable value;                             // construct value
+        try {
+          value = (Writable)valueClass.newInstance();
+        } catch (InstantiationException e) {
+          throw new RuntimeException(e.toString());
+        } catch (IllegalAccessException e) {
+          throw new RuntimeException(e.toString());
         }
+        value.readFields(in);                       // read a value
+        values[i][j] = value;                       // store it in values
+      }
     }
   }
 
   public void write(DataOutput out) throws IOException {
     out.writeInt(values.length);                 // write values
     for (int i = 0; i < values.length; i++) {
-        out.writeInt(values[i].length);
+      out.writeInt(values[i].length);
     }
     for (int i = 0; i < values.length; i++) {
-        for (int j = 0; j < values[i].length; j++) {
-            values[i][j].write(out);
-        }
+      for (int j = 0; j < values[i].length; j++) {
+        values[i][j].write(out);
+      }
     }
   }
 }

+ 2 - 2
src/java/org/apache/hadoop/io/UTF8.java

@@ -70,7 +70,7 @@ public class UTF8 implements WritableComparable {
   public void set(String string) {
     if (string.length() > 0xffff/3) {             // maybe too long
       LOG.warn("truncating long string: " + string.length()
-                  + " chars, starting with " + string.substring(0, 20));
+               + " chars, starting with " + string.substring(0, 20));
       string = string.substring(0, 0xffff/3);
     }
 
@@ -235,7 +235,7 @@ public class UTF8 implements WritableComparable {
   public static int writeString(DataOutput out, String s) throws IOException {
     if (s.length() > 0xffff/3) {         // maybe too long
       LOG.warn("truncating long string: " + s.length()
-                  + " chars, starting with " + s.substring(0, 20));
+               + " chars, starting with " + s.substring(0, 20));
       s = s.substring(0, 0xffff/3);
     }
 

+ 65 - 65
src/java/org/apache/hadoop/io/WritableUtils.java

@@ -36,14 +36,14 @@ public final class WritableUtils  {
     GZIPInputStream gzi = new GZIPInputStream(new ByteArrayInputStream(buffer, 0, buffer.length));
     byte[] outbuf = new byte[length];
     ByteArrayOutputStream bos =  new ByteArrayOutputStream();
-     int len;
-     while((len=gzi.read(outbuf,0,outbuf.length)) != -1){
-       bos.write(outbuf,0,len);
-     }
-     byte[] decompressed =  bos.toByteArray();
-     bos.close();
-     gzi.close();
-     return decompressed;
+    int len;
+    while((len=gzi.read(outbuf,0,outbuf.length)) != -1){
+      bos.write(outbuf,0,len);
+    }
+    byte[] decompressed =  bos.toByteArray();
+    bos.close();
+    gzi.close();
+    return decompressed;
   }
 
   public static void skipCompressedByteArray(DataInput in) throws IOException {
@@ -61,7 +61,7 @@ public final class WritableUtils  {
       int len = buffer.length;
       out.writeInt(len);
       out.write(buffer,0,len);
-    /* debug only! Once we have confidence, can lose this. */
+      /* debug only! Once we have confidence, can lose this. */
       return ((bytes.length != 0) ? (100*buffer.length)/bytes.length : 0);
     } else {
       out.writeInt(-1);
@@ -212,10 +212,10 @@ public final class WritableUtils  {
    * Allocate a buffer for each thread that tries to clone objects.
    */
   private static ThreadLocal cloneBuffers = new ThreadLocal() {
-    protected synchronized Object initialValue() {
-      return new CopyInCopyOutBuffer();
-    }
-  };
+      protected synchronized Object initialValue() {
+        return new CopyInCopyOutBuffer();
+      }
+    };
   
   /**
    * Make a copy of a writable object using serialization to a buffer.
@@ -253,7 +253,7 @@ public final class WritableUtils  {
    * @throws java.io.IOException 
    */
   public static void writeVInt(DataOutput stream, int i) throws IOException {
-      writeVLong(stream, i);
+    writeVLong(stream, i);
   }
   
   /**
@@ -272,32 +272,32 @@ public final class WritableUtils  {
    * @throws java.io.IOException 
    */
   public static void writeVLong(DataOutput stream, long i) throws IOException {
-      if (i >= -112 && i <= 127) {
-          stream.writeByte((byte)i);
-          return;
-      }
+    if (i >= -112 && i <= 127) {
+      stream.writeByte((byte)i);
+      return;
+    }
       
-      int len = -112;
-      if (i < 0) {
-          i ^= -1L; // take one's complement'
-          len = -120;
-      }
+    int len = -112;
+    if (i < 0) {
+      i ^= -1L; // take one's complement'
+      len = -120;
+    }
       
-      long tmp = i;
-      while (tmp != 0) {
-          tmp = tmp >> 8;
-          len--;
-      }
+    long tmp = i;
+    while (tmp != 0) {
+      tmp = tmp >> 8;
+      len--;
+    }
       
-      stream.writeByte((byte)len);
+    stream.writeByte((byte)len);
       
-      len = (len < -120) ? -(len + 120) : -(len + 112);
+    len = (len < -120) ? -(len + 120) : -(len + 112);
       
-      for (int idx = len; idx != 0; idx--) {
-          int shiftbits = (idx - 1) * 8;
-          long mask = 0xFFL << shiftbits;
-          stream.writeByte((byte)((i & mask) >> shiftbits));
-      }
+    for (int idx = len; idx != 0; idx--) {
+      int shiftbits = (idx - 1) * 8;
+      long mask = 0xFFL << shiftbits;
+      stream.writeByte((byte)((i & mask) >> shiftbits));
+    }
   }
   
 
@@ -308,19 +308,19 @@ public final class WritableUtils  {
    * @return deserialized long from stream.
    */
   public static long readVLong(DataInput stream) throws IOException {
-      int len = stream.readByte();
-      if (len >= -112) {
-          return len;
-      }
-      boolean isNegative = (len < -120);
-      len = isNegative ? -(len + 120) : -(len + 112);
-      long i = 0;
-      for (int idx = 0; idx < len; idx++) {
-          byte b = stream.readByte();
-          i = i << 8;
-          i = i | (b & 0xFF);
-      }
-      return (isNegative ? (i ^ -1L) : i);
+    int len = stream.readByte();
+    if (len >= -112) {
+      return len;
+    }
+    boolean isNegative = (len < -120);
+    len = isNegative ? -(len + 120) : -(len + 112);
+    long i = 0;
+    for (int idx = 0; idx < len; idx++) {
+      byte b = stream.readByte();
+      i = i << 8;
+      i = i | (b & 0xFF);
+    }
+    return (isNegative ? (i ^ -1L) : i);
   }
 
   /**
@@ -330,7 +330,7 @@ public final class WritableUtils  {
    * @return deserialized integer from stream.
    */
   public static int readVInt(DataInput stream) throws IOException {
-      return (int) readVLong(stream);
+    return (int) readVLong(stream);
   }
   
 
@@ -339,25 +339,25 @@ public final class WritableUtils  {
    * @return the encoded length 
    */
   public static int getVIntSize(long i) {
-      if (i >= -112 && i <= 127) {
-          return 1;
-      }
+    if (i >= -112 && i <= 127) {
+      return 1;
+    }
       
-      int len = -112;
-      if (i < 0) {
-          i ^= -1L; // take one's complement'
-          len = -120;
-      }
+    int len = -112;
+    if (i < 0) {
+      i ^= -1L; // take one's complement'
+      len = -120;
+    }
       
-      long tmp = i;
-      while (tmp != 0) {
-          tmp = tmp >> 8;
-          len--;
-      }
+    long tmp = i;
+    while (tmp != 0) {
+      tmp = tmp >> 8;
+      len--;
+    }
       
-      len = (len < -120) ? -(len + 120) : -(len + 112);
+    len = (len < -120) ? -(len + 120) : -(len + 112);
       
-      return len+1;
+    return len+1;
   }
   /**
    * Read an Enum value from DataInput, Enums are read and written 
@@ -379,7 +379,7 @@ public final class WritableUtils  {
    * @throws IOException
    */
   public static void writeEnum(DataOutput out,  Enum enumVal) 
-  throws IOException{
+    throws IOException{
     Text.writeString(out, enumVal.name()); 
   }
 }

+ 1 - 1
src/java/org/apache/hadoop/io/compress/CompressionCodec.java

@@ -34,7 +34,7 @@ public interface CompressionCodec {
    * @return a stream the user can write uncompressed data to
    */
   CompressionOutputStream createOutputStream(OutputStream out) 
-  throws IOException;
+    throws IOException;
   
   /**
    * Create a stream decompressor that will read from the given input stream.

+ 1 - 1
src/java/org/apache/hadoop/io/compress/DecompressorStream.java

@@ -95,7 +95,7 @@ class DecompressorStream extends CompressionInputStream {
   
     int n = in.read(buffer, 0, buffer.length);
     if (n == -1) {
-        throw new EOFException("Unexpected end of input stream");
+      throw new EOFException("Unexpected end of input stream");
     }
 
     decompressor.setInput(buffer, 0, n);

+ 4 - 4
src/java/org/apache/hadoop/io/compress/DefaultCodec.java

@@ -44,9 +44,9 @@ public class DefaultCodec implements Configurable, CompressionCodec {
    * @return a stream the user can write uncompressed data to
    */
   public CompressionOutputStream createOutputStream(OutputStream out) 
-  throws IOException {
+    throws IOException {
     return new CompressorStream(out, ZlibFactory.getZlibCompressor(), 
-        conf.getInt("io.file.buffer.size", 4*1024));
+                                conf.getInt("io.file.buffer.size", 4*1024));
   }
   
   /**
@@ -55,9 +55,9 @@ public class DefaultCodec implements Configurable, CompressionCodec {
    * @return a stream to read uncompressed bytes from
    */
   public CompressionInputStream createInputStream(InputStream in) 
-  throws IOException {
+    throws IOException {
     return new DecompressorStream(in, ZlibFactory.getZlibDecompressor(),
-        conf.getInt("io.file.buffer.size", 4*1024));
+                                  conf.getInt("io.file.buffer.size", 4*1024));
   }
   
   /**

+ 9 - 9
src/java/org/apache/hadoop/io/compress/GzipCodec.java

@@ -73,7 +73,7 @@ public class GzipCodec extends DefaultCodec {
     }
     
     public void write(byte[] data, int offset, int length) 
-    throws IOException {
+      throws IOException {
       out.write(data, offset, length);
     }
     
@@ -141,18 +141,18 @@ public class GzipCodec extends DefaultCodec {
    * @return a stream the user can write uncompressed data to
    */
   public CompressionOutputStream createOutputStream(OutputStream out) 
-  throws IOException {
+    throws IOException {
     CompressionOutputStream compOutStream = null;
     
     if (ZlibFactory.isNativeZlibLoaded()) {
       Compressor compressor = 
         new ZlibCompressor(ZlibCompressor.CompressionLevel.DEFAULT_COMPRESSION,
-            ZlibCompressor.CompressionStrategy.DEFAULT_STRATEGY,
-            ZlibCompressor.CompressionHeader.GZIP_FORMAT,
-            64*1024); 
+                           ZlibCompressor.CompressionStrategy.DEFAULT_STRATEGY,
+                           ZlibCompressor.CompressionHeader.GZIP_FORMAT,
+                           64*1024); 
      
       compOutStream = new CompressorStream(out, compressor,
-                        conf.getInt("io.file.buffer.size", 4*1024)); 
+                                           conf.getInt("io.file.buffer.size", 4*1024)); 
     } else {
       compOutStream = new GzipOutputStream(out);
     }
@@ -166,16 +166,16 @@ public class GzipCodec extends DefaultCodec {
    * @return a stream to read uncompressed bytes from
    */
   public CompressionInputStream createInputStream(InputStream in) 
-  throws IOException {
+    throws IOException {
     CompressionInputStream compInStream = null;
     
     if (ZlibFactory.isNativeZlibLoaded()) {
       Decompressor decompressor =
         new ZlibDecompressor(ZlibDecompressor.CompressionHeader.AUTODETECT_GZIP_ZLIB,
-            64*1-24);
+                             64*1-24);
 
       compInStream = new DecompressorStream(in, decompressor,
-                        conf.getInt("io.file.buffer.size", 4*1024)); 
+                                            conf.getInt("io.file.buffer.size", 4*1024)); 
     } else {
       compInStream = new GzipInputStream(in);
     }

+ 1 - 1
src/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibDeflater.java

@@ -44,7 +44,7 @@ public class BuiltInZlibDeflater extends Deflater implements Compressor {
   }
 
   public synchronized int compress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     return super.deflate(b, off, len);
   }
 }

+ 1 - 1
src/java/org/apache/hadoop/io/compress/zlib/BuiltInZlibInflater.java

@@ -41,7 +41,7 @@ public class BuiltInZlibInflater extends Inflater implements Decompressor {
   }
 
   public synchronized int decompress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     try {
       return super.inflate(b, off, len);
     } catch (DataFormatException dfe) {

+ 3 - 3
src/java/org/apache/hadoop/io/compress/zlib/ZlibFactory.java

@@ -39,7 +39,7 @@ public class ZlibFactory {
   static {
     if (NativeCodeLoader.isNativeCodeLoaded()) {
       nativeZlibLoaded = ZlibCompressor.isNativeZlibLoaded() &&
-                          ZlibDecompressor.isNativeZlibLoaded();
+        ZlibDecompressor.isNativeZlibLoaded();
       
       if (nativeZlibLoaded) {
         LOG.info("Successfully loaded & initialized native-zlib library");
@@ -66,7 +66,7 @@ public class ZlibFactory {
    */
   public static Compressor getZlibCompressor() {
     return (nativeZlibLoaded) ? 
-        new ZlibCompressor() : new BuiltInZlibDeflater(); 
+      new ZlibCompressor() : new BuiltInZlibDeflater(); 
   }
 
   /**
@@ -76,7 +76,7 @@ public class ZlibFactory {
    */
   public static Decompressor getZlibDecompressor() {
     return (nativeZlibLoaded) ? 
-        new ZlibDecompressor() : new BuiltInZlibInflater(); 
+      new ZlibDecompressor() : new BuiltInZlibInflater(); 
   }
   
 }

+ 5 - 5
src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -47,7 +47,7 @@ class RetryInvocationHandler implements InvocationHandler {
   }
 
   public Object invoke(Object proxy, Method method, Object[] args)
-      throws Throwable {
+    throws Throwable {
     RetryPolicy policy = methodNameToPolicyMap.get(method.getName());
     if (policy == null) {
       policy = defaultPolicy;
@@ -60,16 +60,16 @@ class RetryInvocationHandler implements InvocationHandler {
       } catch (Exception e) {
         if (!policy.shouldRetry(e, retries++)) {
           LOG.warn("Exception while invoking " + method.getName()
-              + " of " + implementation.getClass() + ". Not retrying."
-              + StringUtils.stringifyException(e));
+                   + " of " + implementation.getClass() + ". Not retrying."
+                   + StringUtils.stringifyException(e));
           if (!method.getReturnType().equals(Void.TYPE)) {
             throw e; // non-void methods can't fail without an exception
           }
           return null;
         }
         LOG.warn("Exception while invoking " + method.getName()
-            + " of " + implementation.getClass() + ". Retrying."
-            + StringUtils.stringifyException(e));
+                 + " of " + implementation.getClass() + ". Retrying."
+                 + StringUtils.stringifyException(e));
       }
     }
   }

+ 10 - 10
src/java/org/apache/hadoop/io/retry/RetryProxy.java

@@ -38,12 +38,12 @@ public class RetryProxy {
    * @return the retry proxy
    */
   public static Object create(Class<?> iface, Object implementation,
-      RetryPolicy retryPolicy) {
+                              RetryPolicy retryPolicy) {
     return Proxy.newProxyInstance(
-        implementation.getClass().getClassLoader(),
-        new Class<?>[] { iface },
-        new RetryInvocationHandler(implementation, retryPolicy)
-    );
+                                  implementation.getClass().getClassLoader(),
+                                  new Class<?>[] { iface },
+                                  new RetryInvocationHandler(implementation, retryPolicy)
+                                  );
   }  
   
   /**
@@ -59,11 +59,11 @@ public class RetryProxy {
    * @return the retry proxy
    */
   public static Object create(Class<?> iface, Object implementation,
-      Map<String,RetryPolicy> methodNameToPolicyMap) {
+                              Map<String,RetryPolicy> methodNameToPolicyMap) {
     return Proxy.newProxyInstance(
-        implementation.getClass().getClassLoader(),
-        new Class<?>[] { iface },
-        new RetryInvocationHandler(implementation, methodNameToPolicyMap)
-    );
+                                  implementation.getClass().getClassLoader(),
+                                  new Class<?>[] { iface },
+                                  new RetryInvocationHandler(implementation, methodNameToPolicyMap)
+                                  );
   }
 }

+ 98 - 98
src/java/org/apache/hadoop/ipc/SocketChannelOutputStream.java

@@ -33,115 +33,115 @@ import java.nio.channels.SocketChannel;
  */
 class SocketChannelOutputStream extends OutputStream {    
     
-    ByteBuffer buffer;
-    ByteBuffer flush;
-    SocketChannel channel;
-    Selector selector;
+  ByteBuffer buffer;
+  ByteBuffer flush;
+  SocketChannel channel;
+  Selector selector;
     
-    /* ------------------------------------------------------------------------------- */
-    /** Constructor.
-     * 
-     */
-    public SocketChannelOutputStream(SocketChannel channel)
-    {
-        this.channel = channel;
-        buffer = ByteBuffer.allocate(8); // only for small writes
-    }
+  /* ------------------------------------------------------------------------------- */
+  /** Constructor.
+   * 
+   */
+  public SocketChannelOutputStream(SocketChannel channel)
+  {
+    this.channel = channel;
+    buffer = ByteBuffer.allocate(8); // only for small writes
+  }
 
-    /* ------------------------------------------------------------------------------- */
-    /*
-     * @see java.io.OutputStream#write(int)
-     */
-    public void write(int b) throws IOException
-    {
-        buffer.clear();
-        buffer.put((byte)b);
-        buffer.flip();
-        flush = buffer;
-        flushBuffer();
-    }
+  /* ------------------------------------------------------------------------------- */
+  /*
+   * @see java.io.OutputStream#write(int)
+   */
+  public void write(int b) throws IOException
+  {
+    buffer.clear();
+    buffer.put((byte)b);
+    buffer.flip();
+    flush = buffer;
+    flushBuffer();
+  }
 
     
-    /* ------------------------------------------------------------------------------- */
-    /*
-     * @see java.io.OutputStream#close()
-     */
-    public void close() throws IOException
-    {
-        channel.close();
-    }
+  /* ------------------------------------------------------------------------------- */
+  /*
+   * @see java.io.OutputStream#close()
+   */
+  public void close() throws IOException
+  {
+    channel.close();
+  }
 
-    /* ------------------------------------------------------------------------------- */
-    /*
-     * @see java.io.OutputStream#flush()
-     */
-    public void flush() throws IOException
-    {
-    }
+  /* ------------------------------------------------------------------------------- */
+  /*
+   * @see java.io.OutputStream#flush()
+   */
+  public void flush() throws IOException
+  {
+  }
 
-    /* ------------------------------------------------------------------------------- */
-    /*
-     * @see java.io.OutputStream#write(byte[], int, int)
-     */
-    public void write(byte[] buf, int offset, int length) throws IOException
-    {
-        flush = ByteBuffer.wrap(buf,offset,length);
-        flushBuffer();
-    }
+  /* ------------------------------------------------------------------------------- */
+  /*
+   * @see java.io.OutputStream#write(byte[], int, int)
+   */
+  public void write(byte[] buf, int offset, int length) throws IOException
+  {
+    flush = ByteBuffer.wrap(buf,offset,length);
+    flushBuffer();
+  }
 
-    /* ------------------------------------------------------------------------------- */
-    /*
-     * @see java.io.OutputStream#write(byte[])
-     */
-    public void write(byte[] buf) throws IOException
-    {
-        flush = ByteBuffer.wrap(buf);
-        flushBuffer();
-    }
+  /* ------------------------------------------------------------------------------- */
+  /*
+   * @see java.io.OutputStream#write(byte[])
+   */
+  public void write(byte[] buf) throws IOException
+  {
+    flush = ByteBuffer.wrap(buf);
+    flushBuffer();
+  }
 
 
-    /* ------------------------------------------------------------------------------- */
-    private void flushBuffer() throws IOException
-    {
-        while (flush.hasRemaining())
-        {
-            int len = channel.write(flush);
+  /* ------------------------------------------------------------------------------- */
+  private void flushBuffer() throws IOException
+  {
+    while (flush.hasRemaining())
+      {
+        int len = channel.write(flush);
+        if (len < 0)
+          throw new IOException("EOF");
+        if (len == 0)
+          {
+            // write channel full.  Try letting other threads have a go.
+            Thread.yield();
+            len = channel.write(flush);
             if (len < 0)
-                throw new IOException("EOF");
+              throw new IOException("EOF");
             if (len == 0)
-            {
-                // write channel full.  Try letting other threads have a go.
-                Thread.yield();
-                len = channel.write(flush);
-                if (len < 0)
-                    throw new IOException("EOF");
-                if (len == 0)
-                {
-                    // still full.  need to  block until it is writable.
-                    if (selector==null)
-                     {
-                            selector = Selector.open();
-                            channel.register(selector, SelectionKey.OP_WRITE);
-                     }
+              {
+                // still full.  need to  block until it is writable.
+                if (selector==null)
+                  {
+                    selector = Selector.open();
+                    channel.register(selector, SelectionKey.OP_WRITE);
+                  }
 
-                     selector.select();
-                }
-            }
-        }
-        flush = null;
-    }
+                selector.select();
+              }
+          }
+      }
+    flush = null;
+  }
 
-    /* ------------------------------------------------------------------------------- */
-    public void destroy()
-    {
-        if (selector != null)
-        {
-            try{ selector.close();}
-            catch(IOException e){}
-            selector = null;
-            buffer = null;
-            flush = null;
-            channel = null;
-        }
-    }
+  /* ------------------------------------------------------------------------------- */
+  public void destroy()
+  {
+    if (selector != null)
+      {
+        try{ selector.close();}
+        catch(IOException e){}
+        selector = null;
+        buffer = null;
+        flush = null;
+        channel = null;
+      }
+  }
 }

+ 7 - 7
src/java/org/apache/hadoop/mapred/Counters.java

@@ -144,7 +144,7 @@ public class Counters implements Writable {
       return groupCounters.size();
     }
     
-        /**
+    /**
      * Looks up key in the ResourceBundle and returns the corresponding value.
      * If the bundle or the key doesn't exist, returns the default value.
      */
@@ -167,7 +167,7 @@ public class Counters implements Writable {
   // Map from group name (enum class name) to map of int (enum ordinal) to
   // counter record (name-value pair).
   private Map<String,Map<Integer,CounterRec>> counters =
-          new TreeMap<String,Map<Integer,CounterRec>>();
+    new TreeMap<String,Map<Integer,CounterRec>>();
   
   /**
    * Returns the names of all counter classes.
@@ -287,11 +287,11 @@ public class Counters implements Writable {
    * Convenience method for computing the sum of two sets of counters.
    */
   public static Counters sum(Counters a, Counters b) {
-      Counters counters = new Counters();
-      counters.incrAllCounters(a);
-      counters.incrAllCounters(b);
-      return counters;
-    }
+    Counters counters = new Counters();
+    counters.incrAllCounters(a);
+    counters.incrAllCounters(b);
+    return counters;
+  }
   
   /**
    * Returns the total number of counters, by summing the number of counters

+ 3 - 3
src/java/org/apache/hadoop/mapred/InterTrackerProtocol.java

@@ -62,8 +62,8 @@ interface InterTrackerProtocol extends VersionedProtocol {
    *         fresh instructions.
    */
   HeartbeatResponse heartbeat(TaskTrackerStatus status, 
-          boolean initialContact, boolean acceptNewTasks, short responseId)
-  throws IOException;
+                              boolean initialContact, boolean acceptNewTasks, short responseId)
+    throws IOException;
 
   /**
    * The task tracker calls this once, to discern where it can find
@@ -92,7 +92,7 @@ interface InterTrackerProtocol extends VersionedProtocol {
    * @throws IOException
    */
   TaskCompletionEvent[] getTaskCompletionEvents(
-      String jobid, int fromEventId, int maxEvents) throws IOException;
+                                                String jobid, int fromEventId, int maxEvents) throws IOException;
   
 }
 

+ 655 - 655
src/java/org/apache/hadoop/mapred/JobClient.java

@@ -38,766 +38,766 @@ import java.util.*;
  * @author Mike Cafarella
  *******************************************************/
 public class JobClient extends ToolBase implements MRConstants  {
-    private static final Log LOG = LogFactory.getLog("org.apache.hadoop.mapred.JobClient");
-    public static enum TaskStatusFilter { NONE, FAILED, SUCCEEDED, ALL }
-    private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED; 
-
-    static long MAX_JOBPROFILE_AGE = 1000 * 2;
+  private static final Log LOG = LogFactory.getLog("org.apache.hadoop.mapred.JobClient");
+  public static enum TaskStatusFilter { NONE, FAILED, SUCCEEDED, ALL }
+  private TaskStatusFilter taskOutputFilter = TaskStatusFilter.FAILED; 
+
+  static long MAX_JOBPROFILE_AGE = 1000 * 2;
+
+  /**
+   * A NetworkedJob is an implementation of RunningJob.  It holds
+   * a JobProfile object to provide some info, and interacts with the
+   * remote service to provide certain functionality.
+   */
+  class NetworkedJob implements RunningJob {
+    JobProfile profile;
+    JobStatus status;
+    long statustime;
 
     /**
-     * A NetworkedJob is an implementation of RunningJob.  It holds
-     * a JobProfile object to provide some info, and interacts with the
-     * remote service to provide certain functionality.
+     * We store a JobProfile and a timestamp for when we last
+     * acquired the job profile.  If the job is null, then we cannot
+     * perform any of the tasks.  The job might be null if the JobTracker
+     * has completely forgotten about the job.  (eg, 24 hours after the
+     * job completes.)
      */
-    class NetworkedJob implements RunningJob {
-        JobProfile profile;
-        JobStatus status;
-        long statustime;
-
-        /**
-         * We store a JobProfile and a timestamp for when we last
-         * acquired the job profile.  If the job is null, then we cannot
-         * perform any of the tasks.  The job might be null if the JobTracker
-         * has completely forgotten about the job.  (eg, 24 hours after the
-         * job completes.)
-         */
-        public NetworkedJob(JobStatus job) throws IOException {
-            this.status = job;
-            this.profile = jobSubmitClient.getJobProfile(job.getJobId());
-            this.statustime = System.currentTimeMillis();
-        }
-
-        /**
-         * Some methods rely on having a recent job profile object.  Refresh
-         * it, if necessary
-         */
-        synchronized void ensureFreshStatus() throws IOException {
-            if (System.currentTimeMillis() - statustime > MAX_JOBPROFILE_AGE) {
-                this.status = jobSubmitClient.getJobStatus(profile.getJobId());
-                this.statustime = System.currentTimeMillis();
-            }
-        }
-
-        /**
-         * An identifier for the job
-         */
-        public String getJobID() {
-            return profile.getJobId();
-        }
-
-        /**
-         * The name of the job file
-         */
-        public String getJobFile() {
-            return profile.getJobFile();
-        }
-
-        /**
-         * A URL where the job's status can be seen
-         */
-        public String getTrackingURL() {
-            return profile.getURL().toString();
-        }
-
-        /**
-         * A float between 0.0 and 1.0, indicating the % of map work
-         * completed.
-         */
-        public float mapProgress() throws IOException {
-            ensureFreshStatus();
-            return status.mapProgress();
-        }
-
-        /**
-         * A float between 0.0 and 1.0, indicating the % of reduce work
-         * completed.
-         */
-        public float reduceProgress() throws IOException {
-            ensureFreshStatus();
-            return status.reduceProgress();
-        }
-
-        /**
-         * Returns immediately whether the whole job is done yet or not.
-         */
-        public synchronized boolean isComplete() throws IOException {
-            ensureFreshStatus();
-            return (status.getRunState() == JobStatus.SUCCEEDED ||
-                    status.getRunState() == JobStatus.FAILED);
-        }
-
-        /**
-         * True iff job completed successfully.
-         */
-        public synchronized boolean isSuccessful() throws IOException {
-            ensureFreshStatus();
-            return status.getRunState() == JobStatus.SUCCEEDED;
-        }
-
-        /**
-         * Blocks until the job is finished
-         */
-        public synchronized void waitForCompletion() throws IOException {
-            while (! isComplete()) {
-                try {
-                    Thread.sleep(5000);
-                } catch (InterruptedException ie) {
-                }
-            }
-        }
+    public NetworkedJob(JobStatus job) throws IOException {
+      this.status = job;
+      this.profile = jobSubmitClient.getJobProfile(job.getJobId());
+      this.statustime = System.currentTimeMillis();
+    }
 
-        /**
-         * Tells the service to terminate the current job.
-         */
-        public synchronized void killJob() throws IOException {
-            jobSubmitClient.killJob(getJobID());
-        }
-        /**
-         * Fetch task completion events from jobtracker for this job. 
-         */
-        public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
-            int startFrom) throws IOException{
-          return jobSubmitClient.getTaskCompletionEvents(
-              getJobID(), startFrom, 10); 
-        }
+    /**
+     * Some methods rely on having a recent job profile object.  Refresh
+     * it, if necessary
+     */
+    synchronized void ensureFreshStatus() throws IOException {
+      if (System.currentTimeMillis() - statustime > MAX_JOBPROFILE_AGE) {
+        this.status = jobSubmitClient.getJobStatus(profile.getJobId());
+        this.statustime = System.currentTimeMillis();
+      }
+    }
 
-        /**
-         * Dump stats to screen
-         */
-        public String toString() {
-            try {
-                ensureFreshStatus();
-            } catch (IOException e) {
-            }
-            return "Job: " + profile.getJobId() + "\n" + 
-                "file: " + profile.getJobFile() + "\n" + 
-                "tracking URL: " + profile.getURL() + "\n" + 
-                "map() completion: " + status.mapProgress() + "\n" + 
-                "reduce() completion: " + status.reduceProgress();
-        }
-        
-        /**
-         * Returns the counters for this job
-         */
-        public Counters getCounters() throws IOException {
-          return jobSubmitClient.getJobCounters(getJobID());
-        }
+    /**
+     * An identifier for the job
+     */
+    public String getJobID() {
+      return profile.getJobId();
     }
 
-    JobSubmissionProtocol jobSubmitClient;
-    FileSystem fs = null;
+    /**
+     * The name of the job file
+     */
+    public String getJobFile() {
+      return profile.getJobFile();
+    }
 
-    static Random r = new Random();
+    /**
+     * A URL where the job's status can be seen
+     */
+    public String getTrackingURL() {
+      return profile.getURL().toString();
+    }
 
     /**
-     * Build a job client, connect to the default job tracker
+     * A float between 0.0 and 1.0, indicating the % of map work
+     * completed.
      */
-    public JobClient() {
+    public float mapProgress() throws IOException {
+      ensureFreshStatus();
+      return status.mapProgress();
     }
-    
-    public JobClient(Configuration conf) throws IOException {
-        setConf(conf);
-        init();
+
+    /**
+     * A float between 0.0 and 1.0, indicating the % of reduce work
+     * completed.
+     */
+    public float reduceProgress() throws IOException {
+      ensureFreshStatus();
+      return status.reduceProgress();
     }
-    
-    public void init() throws IOException {
-        String tracker = conf.get("mapred.job.tracker", "local");
-        if ("local".equals(tracker)) {
-          this.jobSubmitClient = new LocalJobRunner(conf);
-        } else {
-          this.jobSubmitClient = (JobSubmissionProtocol) 
-            RPC.getProxy(JobSubmissionProtocol.class,
-                         JobSubmissionProtocol.versionID,
-                         JobTracker.getAddress(conf), conf);
-        }        
+
+    /**
+     * Returns immediately whether the whole job is done yet or not.
+     */
+    public synchronized boolean isComplete() throws IOException {
+      ensureFreshStatus();
+      return (status.getRunState() == JobStatus.SUCCEEDED ||
+              status.getRunState() == JobStatus.FAILED);
     }
-  
+
     /**
-     * Build a job client, connect to the indicated job tracker.
+     * True iff job completed successfully.
      */
-    public JobClient(InetSocketAddress jobTrackAddr, Configuration conf) throws IOException {
-        this.jobSubmitClient = (JobSubmissionProtocol) 
-            RPC.getProxy(JobSubmissionProtocol.class,
-                         JobSubmissionProtocol.versionID, jobTrackAddr, conf);
+    public synchronized boolean isSuccessful() throws IOException {
+      ensureFreshStatus();
+      return status.getRunState() == JobStatus.SUCCEEDED;
     }
 
+    /**
+     * Blocks until the job is finished
+     */
+    public synchronized void waitForCompletion() throws IOException {
+      while (! isComplete()) {
+        try {
+          Thread.sleep(5000);
+        } catch (InterruptedException ie) {
+        }
+      }
+    }
 
     /**
+     * Tells the service to terminate the current job.
+     */
+    public synchronized void killJob() throws IOException {
+      jobSubmitClient.killJob(getJobID());
+    }
+    /**
+     * Fetch task completion events from jobtracker for this job. 
      */
-    public synchronized void close() throws IOException {
+    public synchronized TaskCompletionEvent[] getTaskCompletionEvents(
+                                                                      int startFrom) throws IOException{
+      return jobSubmitClient.getTaskCompletionEvents(
+                                                     getJobID(), startFrom, 10); 
     }
 
     /**
-     * Get a filesystem handle.  We need this to prepare jobs
-     * for submission to the MapReduce system.
+     * Dump stats to screen
      */
-    public synchronized FileSystem getFs() throws IOException {
-      if (this.fs == null) {
-        String fsName = jobSubmitClient.getFilesystemName();
-        this.fs = FileSystem.getNamed(fsName, this.conf);
+    public String toString() {
+      try {
+        ensureFreshStatus();
+      } catch (IOException e) {
       }
-      return fs;
+      return "Job: " + profile.getJobId() + "\n" + 
+        "file: " + profile.getJobFile() + "\n" + 
+        "tracking URL: " + profile.getURL() + "\n" + 
+        "map() completion: " + status.mapProgress() + "\n" + 
+        "reduce() completion: " + status.reduceProgress();
     }
-
+        
     /**
-     * Submit a job to the MR system
+     * Returns the counters for this job
      */
-    public RunningJob submitJob(String jobFile) throws FileNotFoundException, 
-      InvalidJobConfException,IOException {
-        // Load in the submitted job details
-        JobConf job = new JobConf(jobFile);
-        return submitJob(job);
+    public Counters getCounters() throws IOException {
+      return jobSubmitClient.getJobCounters(getJobID());
     }
+  }
+
+  JobSubmissionProtocol jobSubmitClient;
+  FileSystem fs = null;
+
+  static Random r = new Random();
+
+  /**
+   * Build a job client, connect to the default job tracker
+   */
+  public JobClient() {
+  }
+    
+  public JobClient(Configuration conf) throws IOException {
+    setConf(conf);
+    init();
+  }
+    
+  public void init() throws IOException {
+    String tracker = conf.get("mapred.job.tracker", "local");
+    if ("local".equals(tracker)) {
+      this.jobSubmitClient = new LocalJobRunner(conf);
+    } else {
+      this.jobSubmitClient = (JobSubmissionProtocol) 
+        RPC.getProxy(JobSubmissionProtocol.class,
+                     JobSubmissionProtocol.versionID,
+                     JobTracker.getAddress(conf), conf);
+    }        
+  }
+  
+  /**
+   * Build a job client, connect to the indicated job tracker.
+   */
+  public JobClient(InetSocketAddress jobTrackAddr, Configuration conf) throws IOException {
+    this.jobSubmitClient = (JobSubmissionProtocol) 
+      RPC.getProxy(JobSubmissionProtocol.class,
+                   JobSubmissionProtocol.versionID, jobTrackAddr, conf);
+  }
+
+
+  /**
+   */
+  public synchronized void close() throws IOException {
+  }
+
+  /**
+   * Get a filesystem handle.  We need this to prepare jobs
+   * for submission to the MapReduce system.
+   */
+  public synchronized FileSystem getFs() throws IOException {
+    if (this.fs == null) {
+      String fsName = jobSubmitClient.getFilesystemName();
+      this.fs = FileSystem.getNamed(fsName, this.conf);
+    }
+    return fs;
+  }
+
+  /**
+   * Submit a job to the MR system
+   */
+  public RunningJob submitJob(String jobFile) throws FileNotFoundException, 
+                                                     InvalidJobConfException,IOException {
+    // Load in the submitted job details
+    JobConf job = new JobConf(jobFile);
+    return submitJob(job);
+  }
     
    
-    /**
-     * Submit a job to the MR system
-     */
-    public RunningJob submitJob(JobConf job) throws FileNotFoundException, 
-      InvalidJobConfException, IOException {
-        //
-        // First figure out what fs the JobTracker is using.  Copy the
-        // job to it, under a temporary name.  This allows DFS to work,
-        // and under the local fs also provides UNIX-like object loading 
-        // semantics.  (that is, if the job file is deleted right after
-        // submission, we can still run the submission to completion)
-        //
-
-        // Create a number of filenames in the JobTracker's fs namespace
-        Path submitJobDir = new Path(job.getSystemDir(), "submit_" + Integer.toString(Math.abs(r.nextInt()), 36));
-        Path submitJobFile = new Path(submitJobDir, "job.xml");
-        Path submitJarFile = new Path(submitJobDir, "job.jar");
-        Path submitSplitFile = new Path(submitJobDir, "job.split");
+  /**
+   * Submit a job to the MR system
+   */
+  public RunningJob submitJob(JobConf job) throws FileNotFoundException, 
+                                                  InvalidJobConfException, IOException {
+    //
+    // First figure out what fs the JobTracker is using.  Copy the
+    // job to it, under a temporary name.  This allows DFS to work,
+    // and under the local fs also provides UNIX-like object loading 
+    // semantics.  (that is, if the job file is deleted right after
+    // submission, we can still run the submission to completion)
+    //
+
+    // Create a number of filenames in the JobTracker's fs namespace
+    Path submitJobDir = new Path(job.getSystemDir(), "submit_" + Integer.toString(Math.abs(r.nextInt()), 36));
+    Path submitJobFile = new Path(submitJobDir, "job.xml");
+    Path submitJarFile = new Path(submitJobDir, "job.jar");
+    Path submitSplitFile = new Path(submitJobDir, "job.split");
         
-        FileSystem fs = getFs();
-        LOG.debug("default FileSystem: " + fs.getUri());
-        // try getting the md5 of the archives
-        URI[] tarchives = DistributedCache.getCacheArchives(job);
-        URI[] tfiles = DistributedCache.getCacheFiles(job);
-        if ((tarchives != null) || (tfiles != null)) {
-          // prepare these archives for md5 checksums
-          if (tarchives != null) {
-            String md5Archives = StringUtils.byteToHexString(DistributedCache
-                .createMD5(tarchives[0], job));
-            for (int i = 1; i < tarchives.length; i++) {
-              md5Archives = md5Archives
-                  + ","
-                  + StringUtils.byteToHexString(DistributedCache
-                      .createMD5(tarchives[i], job));
-            }
-            DistributedCache.setArchiveMd5(job, md5Archives);
-            //job.set("mapred.cache.archivemd5", md5Archives);
-          }
-          if (tfiles != null) {
-            String md5Files = StringUtils.byteToHexString(DistributedCache
-                .createMD5(tfiles[0], job));
-            for (int i = 1; i < tfiles.length; i++) {
-              md5Files = md5Files
-                  + ","
-                  + StringUtils.byteToHexString(DistributedCache
-                      .createMD5(tfiles[i], job));
-            }
-            DistributedCache.setFileMd5(job, md5Files);
-            //"mapred.cache.filemd5", md5Files);
-          }
+    FileSystem fs = getFs();
+    LOG.debug("default FileSystem: " + fs.getUri());
+    // try getting the md5 of the archives
+    URI[] tarchives = DistributedCache.getCacheArchives(job);
+    URI[] tfiles = DistributedCache.getCacheFiles(job);
+    if ((tarchives != null) || (tfiles != null)) {
+      // prepare these archives for md5 checksums
+      if (tarchives != null) {
+        String md5Archives = StringUtils.byteToHexString(DistributedCache
+                                                         .createMD5(tarchives[0], job));
+        for (int i = 1; i < tarchives.length; i++) {
+          md5Archives = md5Archives
+            + ","
+            + StringUtils.byteToHexString(DistributedCache
+                                          .createMD5(tarchives[i], job));
+        }
+        DistributedCache.setArchiveMd5(job, md5Archives);
+        //job.set("mapred.cache.archivemd5", md5Archives);
+      }
+      if (tfiles != null) {
+        String md5Files = StringUtils.byteToHexString(DistributedCache
+                                                      .createMD5(tfiles[0], job));
+        for (int i = 1; i < tfiles.length; i++) {
+          md5Files = md5Files
+            + ","
+            + StringUtils.byteToHexString(DistributedCache
+                                          .createMD5(tfiles[i], job));
         }
+        DistributedCache.setFileMd5(job, md5Files);
+        //"mapred.cache.filemd5", md5Files);
+      }
+    }
        
-        String originalJarPath = job.getJar();
-        short replication = (short)job.getInt("mapred.submit.replication", 10);
+    String originalJarPath = job.getJar();
+    short replication = (short)job.getInt("mapred.submit.replication", 10);
 
-        if (originalJarPath != null) {           // copy jar to JobTracker's fs
-          // use jar name if job is not named. 
-          if( "".equals(job.getJobName() )){
-            job.setJobName(new Path(originalJarPath).getName());
-          }
-          job.setJar(submitJarFile.toString());
-          fs.copyFromLocalFile(new Path(originalJarPath), submitJarFile);
-          fs.setReplication(submitJarFile, replication);
-        }
+    if (originalJarPath != null) {           // copy jar to JobTracker's fs
+      // use jar name if job is not named. 
+      if( "".equals(job.getJobName() )){
+        job.setJobName(new Path(originalJarPath).getName());
+      }
+      job.setJar(submitJarFile.toString());
+      fs.copyFromLocalFile(new Path(originalJarPath), submitJarFile);
+      fs.setReplication(submitJarFile, replication);
+    }
 
-        // Set the user's name and working directory
-        String user = System.getProperty("user.name");
-        job.setUser(user != null ? user : "Dr Who");
-        if (job.getWorkingDirectory() == null) {
-          job.setWorkingDirectory(fs.getWorkingDirectory());          
-        }
+    // Set the user's name and working directory
+    String user = System.getProperty("user.name");
+    job.setUser(user != null ? user : "Dr Who");
+    if (job.getWorkingDirectory() == null) {
+      job.setWorkingDirectory(fs.getWorkingDirectory());          
+    }
 
-        // Check the input specification 
-        job.getInputFormat().validateInput(job);
-
-        // Check the output specification
-        job.getOutputFormat().checkOutputSpecs(fs, job);
-
-        // Create the splits for the job
-        LOG.debug("Creating splits at " + fs.makeQualified(submitSplitFile));
-        InputSplit[] splits = 
-          job.getInputFormat().getSplits(job, job.getNumMapTasks());
-        // sort the splits into order based on size, so that the biggest
-        // go first
-        Arrays.sort(splits, new Comparator() {
-          public int compare(Object a, Object b) {
-            try {
-              long left = ((InputSplit) a).getLength();
-              long right = ((InputSplit) b).getLength();
-              if (left == right) {
-                return 0;
-              } else if (left < right) {
-                return 1;
-              } else {
-                return -1;
-              }
-            } catch (IOException ie) {
-              throw new RuntimeException("Problem getting input split size",
-                                         ie);
+    // Check the input specification 
+    job.getInputFormat().validateInput(job);
+
+    // Check the output specification
+    job.getOutputFormat().checkOutputSpecs(fs, job);
+
+    // Create the splits for the job
+    LOG.debug("Creating splits at " + fs.makeQualified(submitSplitFile));
+    InputSplit[] splits = 
+      job.getInputFormat().getSplits(job, job.getNumMapTasks());
+    // sort the splits into order based on size, so that the biggest
+    // go first
+    Arrays.sort(splits, new Comparator() {
+        public int compare(Object a, Object b) {
+          try {
+            long left = ((InputSplit) a).getLength();
+            long right = ((InputSplit) b).getLength();
+            if (left == right) {
+              return 0;
+            } else if (left < right) {
+              return 1;
+            } else {
+              return -1;
             }
+          } catch (IOException ie) {
+            throw new RuntimeException("Problem getting input split size",
+                                       ie);
           }
-        });
-        // write the splits to a file for the job tracker
-        FSDataOutputStream out = fs.create(submitSplitFile);
-        try {
-          writeSplitsFile(splits, out);
-        } finally {
-          out.close();
         }
-        job.set("mapred.job.split.file", submitSplitFile.toString());
-        job.setNumMapTasks(splits.length);
+      });
+    // write the splits to a file for the job tracker
+    FSDataOutputStream out = fs.create(submitSplitFile);
+    try {
+      writeSplitsFile(splits, out);
+    } finally {
+      out.close();
+    }
+    job.set("mapred.job.split.file", submitSplitFile.toString());
+    job.setNumMapTasks(splits.length);
         
-        // Write job file to JobTracker's fs        
-        out = fs.create(submitJobFile, replication);
-        try {
-          job.write(out);
-        } finally {
-          out.close();
-        }
+    // Write job file to JobTracker's fs        
+    out = fs.create(submitJobFile, replication);
+    try {
+      job.write(out);
+    } finally {
+      out.close();
+    }
 
-        //
-        // Now, actually submit the job (using the submit name)
-        //
-        JobStatus status = jobSubmitClient.submitJob(submitJobFile.toString());
-        if (status != null) {
-            return new NetworkedJob(status);
-        } else {
-            throw new IOException("Could not launch job");
-        }
+    //
+    // Now, actually submit the job (using the submit name)
+    //
+    JobStatus status = jobSubmitClient.submitJob(submitJobFile.toString());
+    if (status != null) {
+      return new NetworkedJob(status);
+    } else {
+      throw new IOException("Could not launch job");
     }
+  }
 
-    static class RawSplit implements Writable {
-      private String splitClass;
-      private BytesWritable bytes = new BytesWritable();
-      private String[] locations;
+  static class RawSplit implements Writable {
+    private String splitClass;
+    private BytesWritable bytes = new BytesWritable();
+    private String[] locations;
       
-      public void setBytes(byte[] data, int offset, int length) {
-        bytes.set(data, offset, length);
-      }
+    public void setBytes(byte[] data, int offset, int length) {
+      bytes.set(data, offset, length);
+    }
 
-      public void setClassName(String className) {
-        splitClass = className;
-      }
+    public void setClassName(String className) {
+      splitClass = className;
+    }
       
-      public String getClassName() {
-        return splitClass;
-      }
+    public String getClassName() {
+      return splitClass;
+    }
       
-      public BytesWritable getBytes() {
-        return bytes;
-      }
+    public BytesWritable getBytes() {
+      return bytes;
+    }
       
-      public void setLocations(String[] locations) {
-        this.locations = locations;
-      }
+    public void setLocations(String[] locations) {
+      this.locations = locations;
+    }
       
-      public String[] getLocations() {
-        return locations;
-      }
+    public String[] getLocations() {
+      return locations;
+    }
       
-      public void readFields(DataInput in) throws IOException {
-        splitClass = Text.readString(in);
-        bytes.readFields(in);
-        int len = WritableUtils.readVInt(in);
-        locations = new String[len];
-        for(int i=0; i < len; ++i) {
-          locations[i] = Text.readString(in);
-        }
+    public void readFields(DataInput in) throws IOException {
+      splitClass = Text.readString(in);
+      bytes.readFields(in);
+      int len = WritableUtils.readVInt(in);
+      locations = new String[len];
+      for(int i=0; i < len; ++i) {
+        locations[i] = Text.readString(in);
       }
+    }
       
-      public void write(DataOutput out) throws IOException {
-        Text.writeString(out, splitClass);
-        bytes.write(out);
-        WritableUtils.writeVInt(out, locations.length);
-        for(int i = 0; i < locations.length; i++) {
-          Text.writeString(out, locations[i]);
-        }        
-      }
+    public void write(DataOutput out) throws IOException {
+      Text.writeString(out, splitClass);
+      bytes.write(out);
+      WritableUtils.writeVInt(out, locations.length);
+      for(int i = 0; i < locations.length; i++) {
+        Text.writeString(out, locations[i]);
+      }        
     }
+  }
     
-    private static final int CURRENT_SPLIT_FILE_VERSION = 0;
-    private static final byte[] SPLIT_FILE_HEADER = "SPL".getBytes();
+  private static final int CURRENT_SPLIT_FILE_VERSION = 0;
+  private static final byte[] SPLIT_FILE_HEADER = "SPL".getBytes();
     
-    /** Create the list of input splits and write them out in a file for
-     *the JobTracker. The format is:
-     * <format version>
-     * <numSplits>
-     * for each split:
-     *    <RawSplit>
-     * @param splits the input splits to write out
-     * @param out the stream to write to
-     */
-    private void writeSplitsFile(InputSplit[] splits, FSDataOutputStream out) throws IOException {
-      out.write(SPLIT_FILE_HEADER);
-      WritableUtils.writeVInt(out, CURRENT_SPLIT_FILE_VERSION);
-      WritableUtils.writeVInt(out, splits.length);
-      DataOutputBuffer buffer = new DataOutputBuffer();
-      RawSplit rawSplit = new RawSplit();
-      for(InputSplit split: splits) {
-        rawSplit.setClassName(split.getClass().getName());
-        buffer.reset();
-        split.write(buffer);
-        rawSplit.setBytes(buffer.getData(), 0, buffer.getLength());
-        rawSplit.setLocations(split.getLocations());
-        rawSplit.write(out);
-      }
+  /** Create the list of input splits and write them out in a file for
+   *the JobTracker. The format is:
+   * <format version>
+   * <numSplits>
+   * for each split:
+   *    <RawSplit>
+   * @param splits the input splits to write out
+   * @param out the stream to write to
+   */
+  private void writeSplitsFile(InputSplit[] splits, FSDataOutputStream out) throws IOException {
+    out.write(SPLIT_FILE_HEADER);
+    WritableUtils.writeVInt(out, CURRENT_SPLIT_FILE_VERSION);
+    WritableUtils.writeVInt(out, splits.length);
+    DataOutputBuffer buffer = new DataOutputBuffer();
+    RawSplit rawSplit = new RawSplit();
+    for(InputSplit split: splits) {
+      rawSplit.setClassName(split.getClass().getName());
+      buffer.reset();
+      split.write(buffer);
+      rawSplit.setBytes(buffer.getData(), 0, buffer.getLength());
+      rawSplit.setLocations(split.getLocations());
+      rawSplit.write(out);
     }
-
-    /**
-     * Read a splits file into a list of raw splits
-     * @param in the stream to read from
-     * @return the complete list of splits
-     * @throws IOException
-     */
-    static RawSplit[] readSplitFile(DataInput in) throws IOException {
-      byte[] header = new byte[SPLIT_FILE_HEADER.length];
-      in.readFully(header);
-      if (!Arrays.equals(SPLIT_FILE_HEADER, header)) {
-        throw new IOException("Invalid header on split file");
-      }
-      int vers = WritableUtils.readVInt(in);
-      if (vers != CURRENT_SPLIT_FILE_VERSION) {
-        throw new IOException("Unsupported split version " + vers);
-      }
-      int len = WritableUtils.readVInt(in);
-      RawSplit[] result = new RawSplit[len];
-      for(int i=0; i < len; ++i) {
-        result[i] = new RawSplit();
-        result[i].readFields(in);
-      }
-      return result;
+  }
+
+  /**
+   * Read a splits file into a list of raw splits
+   * @param in the stream to read from
+   * @return the complete list of splits
+   * @throws IOException
+   */
+  static RawSplit[] readSplitFile(DataInput in) throws IOException {
+    byte[] header = new byte[SPLIT_FILE_HEADER.length];
+    in.readFully(header);
+    if (!Arrays.equals(SPLIT_FILE_HEADER, header)) {
+      throw new IOException("Invalid header on split file");
     }
-    
-    /**
-     * Get an RunningJob object to track an ongoing job.  Returns
-     * null if the id does not correspond to any known job.
-     */
-    public RunningJob getJob(String jobid) throws IOException {
-        JobStatus status = jobSubmitClient.getJobStatus(jobid);
-        if (status != null) {
-            return new NetworkedJob(status);
-        } else {
-            return null;
-        }
+    int vers = WritableUtils.readVInt(in);
+    if (vers != CURRENT_SPLIT_FILE_VERSION) {
+      throw new IOException("Unsupported split version " + vers);
     }
-
-    /**
-     * Get the information of the current state of the map tasks of a job.
-     * @param jobId the job to query
-     * @return the list of all of the map tips
-     */
-    public TaskReport[] getMapTaskReports(String jobId) throws IOException {
-      return jobSubmitClient.getMapTaskReports(jobId);
+    int len = WritableUtils.readVInt(in);
+    RawSplit[] result = new RawSplit[len];
+    for(int i=0; i < len; ++i) {
+      result[i] = new RawSplit();
+      result[i].readFields(in);
     }
+    return result;
+  }
     
-    /**
-     * Get the information of the current state of the reduce tasks of a job.
-     * @param jobId the job to query
-     * @return the list of all of the map tips
-     */    
-    public TaskReport[] getReduceTaskReports(String jobId) throws IOException {
-      return jobSubmitClient.getReduceTaskReports(jobId);
+  /**
+   * Get an RunningJob object to track an ongoing job.  Returns
+   * null if the id does not correspond to any known job.
+   */
+  public RunningJob getJob(String jobid) throws IOException {
+    JobStatus status = jobSubmitClient.getJobStatus(jobid);
+    if (status != null) {
+      return new NetworkedJob(status);
+    } else {
+      return null;
     }
+  }
+
+  /**
+   * Get the information of the current state of the map tasks of a job.
+   * @param jobId the job to query
+   * @return the list of all of the map tips
+   */
+  public TaskReport[] getMapTaskReports(String jobId) throws IOException {
+    return jobSubmitClient.getMapTaskReports(jobId);
+  }
     
-    public ClusterStatus getClusterStatus() throws IOException {
-      return jobSubmitClient.getClusterStatus();
-    }
+  /**
+   * Get the information of the current state of the reduce tasks of a job.
+   * @param jobId the job to query
+   * @return the list of all of the map tips
+   */    
+  public TaskReport[] getReduceTaskReports(String jobId) throws IOException {
+    return jobSubmitClient.getReduceTaskReports(jobId);
+  }
     
-    public JobStatus[] jobsToComplete() throws IOException {
-      return jobSubmitClient.jobsToComplete();
-    }
+  public ClusterStatus getClusterStatus() throws IOException {
+    return jobSubmitClient.getClusterStatus();
+  }
     
-    /** Utility that submits a job, then polls for progress until the job is
-     * complete. */
-    public static void runJob(JobConf job) throws IOException {
-      JobClient jc = new JobClient(job);
-      boolean error = true;
-      RunningJob running = null;
-      String lastReport = null;
-      final int MAX_RETRIES = 5;
-      int retries = MAX_RETRIES;
-      TaskStatusFilter filter;
-      try {
-        filter = getTaskOutputFilter(job);
-      } catch(IllegalArgumentException e) {
-        LOG.warn("Invalid Output filter : " + e.getMessage() + 
-        " Valid values are : NONE, FAILED, SUCCEEDED, ALL");
-        throw e;
-      }
-      try {
-        running = jc.submitJob(job);
-        String jobId = running.getJobID();
-        LOG.info("Running job: " + jobId);
-        int eventCounter = 0 ; 
+  public JobStatus[] jobsToComplete() throws IOException {
+    return jobSubmitClient.jobsToComplete();
+  }
+    
+  /** Utility that submits a job, then polls for progress until the job is
+   * complete. */
+  public static void runJob(JobConf job) throws IOException {
+    JobClient jc = new JobClient(job);
+    boolean error = true;
+    RunningJob running = null;
+    String lastReport = null;
+    final int MAX_RETRIES = 5;
+    int retries = MAX_RETRIES;
+    TaskStatusFilter filter;
+    try {
+      filter = getTaskOutputFilter(job);
+    } catch(IllegalArgumentException e) {
+      LOG.warn("Invalid Output filter : " + e.getMessage() + 
+               " Valid values are : NONE, FAILED, SUCCEEDED, ALL");
+      throw e;
+    }
+    try {
+      running = jc.submitJob(job);
+      String jobId = running.getJobID();
+      LOG.info("Running job: " + jobId);
+      int eventCounter = 0 ; 
         
-        while (true) {
-          try {
-            Thread.sleep(1000);
-          } catch (InterruptedException e) {}
-          try {
-            if (running.isComplete()) {
-              break;
-            }
-            running = jc.getJob(jobId);
-            String report = 
-              (" map " + StringUtils.formatPercent(running.mapProgress(), 0)+
-               " reduce " + 
-               StringUtils.formatPercent(running.reduceProgress(), 0));
-            if (!report.equals(lastReport)) {
-              LOG.info(report);
-              lastReport = report;
-            }
+      while (true) {
+        try {
+          Thread.sleep(1000);
+        } catch (InterruptedException e) {}
+        try {
+          if (running.isComplete()) {
+            break;
+          }
+          running = jc.getJob(jobId);
+          String report = 
+            (" map " + StringUtils.formatPercent(running.mapProgress(), 0)+
+             " reduce " + 
+             StringUtils.formatPercent(running.reduceProgress(), 0));
+          if (!report.equals(lastReport)) {
+            LOG.info(report);
+            lastReport = report;
+          }
             
-            if( filter  != TaskStatusFilter.NONE){
-              TaskCompletionEvent[] events = 
-                running.getTaskCompletionEvents(eventCounter); 
-              eventCounter += events.length ;
-              for(TaskCompletionEvent event : events ){
-                switch( filter ){
-                case SUCCEEDED:
-                  if( event.getTaskStatus() == 
+          if( filter  != TaskStatusFilter.NONE){
+            TaskCompletionEvent[] events = 
+              running.getTaskCompletionEvents(eventCounter); 
+            eventCounter += events.length ;
+            for(TaskCompletionEvent event : events ){
+              switch( filter ){
+              case SUCCEEDED:
+                if( event.getTaskStatus() == 
                     TaskCompletionEvent.Status.SUCCEEDED){
-                    LOG.info(event.toString());
-                    displayTaskLogs(event.getTaskId(), event.getTaskTrackerHttp());
-                  }
-                  break; 
-                case FAILED:
-                  if( event.getTaskStatus() == 
+                  LOG.info(event.toString());
+                  displayTaskLogs(event.getTaskId(), event.getTaskTrackerHttp());
+                }
+                break; 
+              case FAILED:
+                if( event.getTaskStatus() == 
                     TaskCompletionEvent.Status.FAILED){
-                    LOG.info(event.toString());
-                    displayTaskLogs(event.getTaskId(), event.getTaskTrackerHttp());
-                  }
-                  break ; 
-                case ALL:
                   LOG.info(event.toString());
                   displayTaskLogs(event.getTaskId(), event.getTaskTrackerHttp());
-                  break;
                 }
+                break ; 
+              case ALL:
+                LOG.info(event.toString());
+                displayTaskLogs(event.getTaskId(), event.getTaskTrackerHttp());
+                break;
               }
             }
-            retries = MAX_RETRIES;
-          } catch (IOException ie) {
-            if (--retries == 0) {
-              LOG.warn("Final attempt failed, killing job.");
-              throw ie;
-            }
-            LOG.info("Communication problem with server: " +
-                     StringUtils.stringifyException(ie));
           }
+          retries = MAX_RETRIES;
+        } catch (IOException ie) {
+          if (--retries == 0) {
+            LOG.warn("Final attempt failed, killing job.");
+            throw ie;
+          }
+          LOG.info("Communication problem with server: " +
+                   StringUtils.stringifyException(ie));
         }
-        if (!running.isSuccessful()) {
-          throw new IOException("Job failed!");
-        }
-        LOG.info("Job complete: " + jobId);
-        running.getCounters().log(LOG);
-        error = false;
-      } finally {
-        if (error && (running != null)) {
-          running.killJob();
-        }
-        jc.close();
       }
-      
+      if (!running.isSuccessful()) {
+        throw new IOException("Job failed!");
+      }
+      LOG.info("Job complete: " + jobId);
+      running.getCounters().log(LOG);
+      error = false;
+    } finally {
+      if (error && (running != null)) {
+        running.killJob();
+      }
+      jc.close();
     }
+      
+  }
 
-    private static void displayTaskLogs(String taskId, String baseUrl)
+  private static void displayTaskLogs(String taskId, String baseUrl)
     throws IOException {
-      // The tasktracker for a 'failed/killed' job might not be around...
-      if (baseUrl != null) {
-        // Copy tasks's stdout of the JobClient
-        getTaskLogs(taskId, new URL(baseUrl+"&filter=stdout"), System.out);
+    // The tasktracker for a 'failed/killed' job might not be around...
+    if (baseUrl != null) {
+      // Copy tasks's stdout of the JobClient
+      getTaskLogs(taskId, new URL(baseUrl+"&filter=stdout"), System.out);
         
-        // Copy task's stderr to stderr of the JobClient 
-        getTaskLogs(taskId, new URL(baseUrl+"&filter=stderr"), System.err);
-      }
+      // Copy task's stderr to stderr of the JobClient 
+      getTaskLogs(taskId, new URL(baseUrl+"&filter=stderr"), System.err);
     }
+  }
     
-    private static void getTaskLogs(String taskId, URL taskLogUrl, 
-            OutputStream out) {
+  private static void getTaskLogs(String taskId, URL taskLogUrl, 
+                                  OutputStream out) {
+    try {
+      URLConnection connection = taskLogUrl.openConnection();
+      BufferedReader input = 
+        new BufferedReader(new InputStreamReader(connection.getInputStream()));
+      BufferedWriter output = 
+        new BufferedWriter(new OutputStreamWriter(out));
       try {
-        URLConnection connection = taskLogUrl.openConnection();
-        BufferedReader input = 
-          new BufferedReader(new InputStreamReader(connection.getInputStream()));
-        BufferedWriter output = 
-          new BufferedWriter(new OutputStreamWriter(out));
-        try {
-          String logData = null;
-          while ((logData = input.readLine()) != null) {
-            if (logData.length() > 0) {
-              output.write(taskId + ": " + logData + "\n");
-              output.flush();
-            }
+        String logData = null;
+        while ((logData = input.readLine()) != null) {
+          if (logData.length() > 0) {
+            output.write(taskId + ": " + logData + "\n");
+            output.flush();
           }
-        } finally {
-            input.close();
         }
-      }catch(IOException ioe){
-        LOG.warn("Error reading task output" + ioe.getMessage()); 
+      } finally {
+        input.close();
       }
-    }    
-
-    static Configuration getConfiguration(String jobTrackerSpec)
-    {
-      Configuration conf = new Configuration();
-      if(jobTrackerSpec != null) {        
-        if(jobTrackerSpec.indexOf(":") >= 0) {
-          conf.set("mapred.job.tracker", jobTrackerSpec);
-        } else {
-          String classpathFile = "hadoop-" + jobTrackerSpec + ".xml";
-          URL validate = conf.getResource(classpathFile);
-          if(validate == null) {
-            throw new RuntimeException(classpathFile + " not found on CLASSPATH");
-          }
-          conf.addFinalResource(classpathFile);
+    }catch(IOException ioe){
+      LOG.warn("Error reading task output" + ioe.getMessage()); 
+    }
+  }    
+
+  static Configuration getConfiguration(String jobTrackerSpec)
+  {
+    Configuration conf = new Configuration();
+    if(jobTrackerSpec != null) {        
+      if(jobTrackerSpec.indexOf(":") >= 0) {
+        conf.set("mapred.job.tracker", jobTrackerSpec);
+      } else {
+        String classpathFile = "hadoop-" + jobTrackerSpec + ".xml";
+        URL validate = conf.getResource(classpathFile);
+        if(validate == null) {
+          throw new RuntimeException(classpathFile + " not found on CLASSPATH");
         }
+        conf.addFinalResource(classpathFile);
       }
-      return conf;
     }
-
-    /**
-     * Sets the output filter for tasks. only those tasks are printed whose
-     * output matches the filter. 
-     * @param newValue task filter.
-     */
-    @Deprecated
+    return conf;
+  }
+
+  /**
+   * Sets the output filter for tasks. only those tasks are printed whose
+   * output matches the filter. 
+   * @param newValue task filter.
+   */
+  @Deprecated
     public void setTaskOutputFilter(TaskStatusFilter newValue){
-      this.taskOutputFilter = newValue ;
-    }
+    this.taskOutputFilter = newValue ;
+  }
     
-    /**
-     * Get the task output filter out of the JobConf
-     * @param job the JobConf to examine
-     * @return the filter level
-     */
-    public static TaskStatusFilter getTaskOutputFilter(JobConf job) {
-      return TaskStatusFilter.valueOf(job.get("jobclient.output.filter", 
-                                              "FAILED"));
-    }
+  /**
+   * Get the task output filter out of the JobConf
+   * @param job the JobConf to examine
+   * @return the filter level
+   */
+  public static TaskStatusFilter getTaskOutputFilter(JobConf job) {
+    return TaskStatusFilter.valueOf(job.get("jobclient.output.filter", 
+                                            "FAILED"));
+  }
     
-    /**
-     * Modify the JobConf to set the task output filter
-     * @param job the JobConf to modify
-     * @param newValue the value to set
-     */
-    public static void setTaskOutputFilter(JobConf job, 
-                                           TaskStatusFilter newValue) {
-      job.set("jobclient.output.filter", newValue.toString());
-    }
+  /**
+   * Modify the JobConf to set the task output filter
+   * @param job the JobConf to modify
+   * @param newValue the value to set
+   */
+  public static void setTaskOutputFilter(JobConf job, 
+                                         TaskStatusFilter newValue) {
+    job.set("jobclient.output.filter", newValue.toString());
+  }
     
-    /**
-     * Returns task output filter.
-     * @return task filter. 
-     */
-    @Deprecated
+  /**
+   * Returns task output filter.
+   * @return task filter. 
+   */
+  @Deprecated
     public TaskStatusFilter getTaskOutputFilter(){
-      return this.taskOutputFilter; 
-    }
+    return this.taskOutputFilter; 
+  }
     
-    public int run(String[] argv) throws Exception {
-        if (argv.length < 2) {
-            System.out.println("JobClient -submit <job> | -status <id> |" + 
-                               " -events <id> |" +
-                               " -kill <id> [-jt <jobtracker:port>|<config>]");
-            System.exit(-1);
-        }
+  public int run(String[] argv) throws Exception {
+    if (argv.length < 2) {
+      System.out.println("JobClient -submit <job> | -status <id> |" + 
+                         " -events <id> |" +
+                         " -kill <id> [-jt <jobtracker:port>|<config>]");
+      System.exit(-1);
+    }
 
-        // initialize JobClient
-        init();
+    // initialize JobClient
+    init();
         
-        // Process args
-        String submitJobFile = null;
-        String jobid = null;
-        boolean getStatus = false;
-        boolean killJob = false;
-
-        for (int i = 0; i < argv.length; i++) {
-            if ("-submit".equals(argv[i])) {
-                submitJobFile = argv[i+1];
-                i++;
-            } else if ("-status".equals(argv[i])) {
-                jobid = argv[i+1];
-                getStatus = true;
-                i++;
-            } else if ("-kill".equals(argv[i])) {
-                jobid = argv[i+1];
-                killJob = true;
-                i++;
-            } else if ("-events".equals(argv[i])) {
-              listEvents(argv[i+1], Integer.parseInt(argv[i+2]), 
-                         Integer.parseInt(argv[i+3]));
-              i += 3;
-            }
-        }
+    // Process args
+    String submitJobFile = null;
+    String jobid = null;
+    boolean getStatus = false;
+    boolean killJob = false;
+
+    for (int i = 0; i < argv.length; i++) {
+      if ("-submit".equals(argv[i])) {
+        submitJobFile = argv[i+1];
+        i++;
+      } else if ("-status".equals(argv[i])) {
+        jobid = argv[i+1];
+        getStatus = true;
+        i++;
+      } else if ("-kill".equals(argv[i])) {
+        jobid = argv[i+1];
+        killJob = true;
+        i++;
+      } else if ("-events".equals(argv[i])) {
+        listEvents(argv[i+1], Integer.parseInt(argv[i+2]), 
+                   Integer.parseInt(argv[i+3]));
+        i += 3;
+      }
+    }
 
-        // Submit the request
-        int exitCode = -1;
-        try {
-            if (submitJobFile != null) {
-                RunningJob job = submitJob(submitJobFile);
-                System.out.println("Created job " + job.getJobID());
-            } else if (getStatus) {
-                RunningJob job = getJob(jobid);
-                if (job == null) {
-                    System.out.println("Could not find job " + jobid);
-                } else {
-                    System.out.println();
-                    System.out.println(job);
-                    exitCode = 0;
-                }
-            } else if (killJob) {
-                RunningJob job = getJob(jobid);
-                if (job == null) {
-                    System.out.println("Could not find job " + jobid);
-                } else {
-                    job.killJob();
-                    System.out.println("Killed job " + jobid);
-                    exitCode = 0;
-                }
-            }
-        } finally {
-            close();
+    // Submit the request
+    int exitCode = -1;
+    try {
+      if (submitJobFile != null) {
+        RunningJob job = submitJob(submitJobFile);
+        System.out.println("Created job " + job.getJobID());
+      } else if (getStatus) {
+        RunningJob job = getJob(jobid);
+        if (job == null) {
+          System.out.println("Could not find job " + jobid);
+        } else {
+          System.out.println();
+          System.out.println(job);
+          exitCode = 0;
         }
-        return exitCode;
+      } else if (killJob) {
+        RunningJob job = getJob(jobid);
+        if (job == null) {
+          System.out.println("Could not find job " + jobid);
+        } else {
+          job.killJob();
+          System.out.println("Killed job " + jobid);
+          exitCode = 0;
+        }
+      }
+    } finally {
+      close();
     }
+    return exitCode;
+  }
     
-    /**
-     * List the events for the given job
-     * @param jobId the job id for the job's events to list
-     * @throws IOException
-     */
-    private void listEvents(String jobId, int fromEventId, int numEvents)
+  /**
+   * List the events for the given job
+   * @param jobId the job id for the job's events to list
+   * @throws IOException
+   */
+  private void listEvents(String jobId, int fromEventId, int numEvents)
     throws IOException {
-      TaskCompletionEvent[] events = 
-        jobSubmitClient.getTaskCompletionEvents(jobId, fromEventId, numEvents);
-      System.out.println("Task completion events for " + jobId);
-      System.out.println("Number of events (from " + fromEventId + 
-          ") are: " + events.length);
-      for(TaskCompletionEvent event: events) {
-        System.out.println(event.getTaskStatus() + " " + event.getTaskId() + 
-                           " " + event.getTaskTrackerHttp());
-      }
+    TaskCompletionEvent[] events = 
+      jobSubmitClient.getTaskCompletionEvents(jobId, fromEventId, numEvents);
+    System.out.println("Task completion events for " + jobId);
+    System.out.println("Number of events (from " + fromEventId + 
+                       ") are: " + events.length);
+    for(TaskCompletionEvent event: events) {
+      System.out.println(event.getTaskStatus() + " " + event.getTaskId() + 
+                         " " + event.getTaskTrackerHttp());
     }
+  }
     
-    /**
-     */
-    public static void main(String argv[]) throws Exception {
-        int res = new JobClient().doMain(new Configuration(), argv);
-        System.exit(res);
-    }
+  /**
+   */
+  public static void main(String argv[]) throws Exception {
+    int res = new JobClient().doMain(new Configuration(), argv);
+    System.exit(res);
+  }
 }
 

+ 835 - 835
src/java/org/apache/hadoop/mapred/JobInProgress.java

@@ -46,912 +46,912 @@ import org.apache.hadoop.metrics.MetricsUtil;
 // doing bookkeeping of its Tasks.
 ///////////////////////////////////////////////////////
 class JobInProgress {
-    private static final Log LOG = LogFactory.getLog("org.apache.hadoop.mapred.JobInProgress");
+  private static final Log LOG = LogFactory.getLog("org.apache.hadoop.mapred.JobInProgress");
     
-    JobProfile profile;
-    JobStatus status;
-    Path localJobFile = null;
-    Path localJarFile = null;
-
-    TaskInProgress maps[] = new TaskInProgress[0];
-    TaskInProgress reduces[] = new TaskInProgress[0];
-    int numMapTasks = 0;
-    int numReduceTasks = 0;
-    int runningMapTasks = 0;
-    int runningReduceTasks = 0;
-    int finishedMapTasks = 0;
-    int finishedReduceTasks = 0;
-    int failedMapTasks = 0 ; 
-    int failedReduceTasks = 0 ; 
-    JobTracker jobtracker = null;
-    Map<String,List<TaskInProgress>> hostToMaps = new HashMap();
-    private int taskCompletionEventTracker = 0 ; 
-    List<TaskCompletionEvent> taskCompletionEvents ;
+  JobProfile profile;
+  JobStatus status;
+  Path localJobFile = null;
+  Path localJarFile = null;
+
+  TaskInProgress maps[] = new TaskInProgress[0];
+  TaskInProgress reduces[] = new TaskInProgress[0];
+  int numMapTasks = 0;
+  int numReduceTasks = 0;
+  int runningMapTasks = 0;
+  int runningReduceTasks = 0;
+  int finishedMapTasks = 0;
+  int finishedReduceTasks = 0;
+  int failedMapTasks = 0 ; 
+  int failedReduceTasks = 0 ; 
+  JobTracker jobtracker = null;
+  Map<String,List<TaskInProgress>> hostToMaps = new HashMap();
+  private int taskCompletionEventTracker = 0 ; 
+  List<TaskCompletionEvent> taskCompletionEvents ;
     
-    // The no. of tasktrackers where >= conf.getMaxTaskFailuresPerTracker()
-    // tasks have failed
-    private volatile int flakyTaskTrackers = 0;
-    // Map of trackerHostName -> no. of task failures
-    private Map<String, Integer> trackerToFailuresMap = 
-      new TreeMap<String, Integer>();
+  // The no. of tasktrackers where >= conf.getMaxTaskFailuresPerTracker()
+  // tasks have failed
+  private volatile int flakyTaskTrackers = 0;
+  // Map of trackerHostName -> no. of task failures
+  private Map<String, Integer> trackerToFailuresMap = 
+    new TreeMap<String, Integer>();
     
-    long startTime;
-    long finishTime;
+  long startTime;
+  long finishTime;
 
-    private JobConf conf;
-    boolean tasksInited = false;
+  private JobConf conf;
+  boolean tasksInited = false;
 
-    private LocalFileSystem localFs;
-    private String uniqueString;
+  private LocalFileSystem localFs;
+  private String uniqueString;
     
-    private Counters mapCounters = new Counters();
-    private Counters reduceCounters = new Counters();
-    private MetricsRecord jobMetrics;
+  private Counters mapCounters = new Counters();
+  private Counters reduceCounters = new Counters();
+  private MetricsRecord jobMetrics;
   
-    /**
-     * Create a JobInProgress with the given job file, plus a handle
-     * to the tracker.
-     */
-    public JobInProgress(String jobFile, JobTracker jobtracker, 
-                         Configuration default_conf) throws IOException {
-        uniqueString = jobtracker.createUniqueId();
-        String jobid = "job_" + uniqueString;
-        String url = "http://" + jobtracker.getJobTrackerMachine() + ":" + jobtracker.getInfoPort() + "/jobdetails.jsp?jobid=" + jobid;
-        this.jobtracker = jobtracker;
-        this.status = new JobStatus(jobid, 0.0f, 0.0f, JobStatus.PREP);
-        this.startTime = System.currentTimeMillis();
-        this.localFs = (LocalFileSystem)FileSystem.getLocal(default_conf);
-
-        JobConf default_job_conf = new JobConf(default_conf);
-        this.localJobFile = default_job_conf.getLocalPath(JobTracker.SUBDIR 
-                                                          +"/"+jobid + ".xml");
-        this.localJarFile = default_job_conf.getLocalPath(JobTracker.SUBDIR
-                                                          +"/"+ jobid + ".jar");
-        FileSystem fs = FileSystem.get(default_conf);
-        fs.copyToLocalFile(new Path(jobFile), localJobFile);
-        conf = new JobConf(localJobFile);
-        this.profile = new JobProfile(conf.getUser(), jobid, jobFile, url,
-                                      conf.getJobName());
-        String jarFile = conf.getJar();
-        if (jarFile != null) {
-          fs.copyToLocalFile(new Path(jarFile), localJarFile);
-          conf.setJar(localJarFile.toString());
-        }
-
-        this.numMapTasks = conf.getNumMapTasks();
-        this.numReduceTasks = conf.getNumReduceTasks();
-        this.taskCompletionEvents = new ArrayList(
-            numMapTasks + numReduceTasks + 10);
+  /**
+   * Create a JobInProgress with the given job file, plus a handle
+   * to the tracker.
+   */
+  public JobInProgress(String jobFile, JobTracker jobtracker, 
+                       Configuration default_conf) throws IOException {
+    uniqueString = jobtracker.createUniqueId();
+    String jobid = "job_" + uniqueString;
+    String url = "http://" + jobtracker.getJobTrackerMachine() + ":" + jobtracker.getInfoPort() + "/jobdetails.jsp?jobid=" + jobid;
+    this.jobtracker = jobtracker;
+    this.status = new JobStatus(jobid, 0.0f, 0.0f, JobStatus.PREP);
+    this.startTime = System.currentTimeMillis();
+    this.localFs = (LocalFileSystem)FileSystem.getLocal(default_conf);
+
+    JobConf default_job_conf = new JobConf(default_conf);
+    this.localJobFile = default_job_conf.getLocalPath(JobTracker.SUBDIR 
+                                                      +"/"+jobid + ".xml");
+    this.localJarFile = default_job_conf.getLocalPath(JobTracker.SUBDIR
+                                                      +"/"+ jobid + ".jar");
+    FileSystem fs = FileSystem.get(default_conf);
+    fs.copyToLocalFile(new Path(jobFile), localJobFile);
+    conf = new JobConf(localJobFile);
+    this.profile = new JobProfile(conf.getUser(), jobid, jobFile, url,
+                                  conf.getJobName());
+    String jarFile = conf.getJar();
+    if (jarFile != null) {
+      fs.copyToLocalFile(new Path(jarFile), localJarFile);
+      conf.setJar(localJarFile.toString());
+    }
+
+    this.numMapTasks = conf.getNumMapTasks();
+    this.numReduceTasks = conf.getNumReduceTasks();
+    this.taskCompletionEvents = new ArrayList(
+                                              numMapTasks + numReduceTasks + 10);
         
-        JobHistory.JobInfo.logSubmitted(jobid, conf.getJobName(), conf.getUser(), 
-            System.currentTimeMillis(), jobFile); 
+    JobHistory.JobInfo.logSubmitted(jobid, conf.getJobName(), conf.getUser(), 
+                                    System.currentTimeMillis(), jobFile); 
         
-        MetricsContext metricsContext = MetricsUtil.getContext("mapred");
-        this.jobMetrics = metricsContext.createRecord("job");
-        this.jobMetrics.setTag("user", conf.getUser());
-        this.jobMetrics.setTag("jobName", conf.getJobName());
-    }
-
-    /**
-     * Called periodically by JobTrackerMetrics to update the metrics for
-     * this job.
-     */
-    public void updateMetrics() {
-        Counters counters = getCounters();
-        for (String groupName : counters.getGroupNames()) {
-          Counters.Group group = counters.getGroup(groupName);
-          jobMetrics.setTag("group", group.getDisplayName());
+    MetricsContext metricsContext = MetricsUtil.getContext("mapred");
+    this.jobMetrics = metricsContext.createRecord("job");
+    this.jobMetrics.setTag("user", conf.getUser());
+    this.jobMetrics.setTag("jobName", conf.getJobName());
+  }
+
+  /**
+   * Called periodically by JobTrackerMetrics to update the metrics for
+   * this job.
+   */
+  public void updateMetrics() {
+    Counters counters = getCounters();
+    for (String groupName : counters.getGroupNames()) {
+      Counters.Group group = counters.getGroup(groupName);
+      jobMetrics.setTag("group", group.getDisplayName());
           
-          for (String counter : group.getCounterNames()) {
-            long value = group.getCounter(counter);
-            jobMetrics.setTag("counter", group.getDisplayName(counter));
-            jobMetrics.setMetric("value", (float) value);
-            jobMetrics.update();
-          }
-        }
+      for (String counter : group.getCounterNames()) {
+        long value = group.getCounter(counter);
+        jobMetrics.setTag("counter", group.getDisplayName(counter));
+        jobMetrics.setMetric("value", (float) value);
+        jobMetrics.update();
+      }
     }
+  }
     
-    /**
-     * Construct the splits, etc.  This is invoked from an async
-     * thread so that split-computation doesn't block anyone.
-     */
-    public synchronized void initTasks() throws IOException {
-        if (tasksInited) {
-            return;
-        }
-
-        //
-        // read input splits and create a map per a split
-        //
-        String jobFile = profile.getJobFile();
-
-        FileSystem fs = FileSystem.get(conf);
-        DataInputStream splitFile =
-          fs.open(new Path(conf.get("mapred.job.split.file")));
-        JobClient.RawSplit[] splits;
-        try {
-          splits = JobClient.readSplitFile(splitFile);
-        } finally {
-          splitFile.close();
-        }
-        numMapTasks = splits.length;
-        maps = new TaskInProgress[numMapTasks];
-        for(int i=0; i < numMapTasks; ++i) {
-          maps[i] = new TaskInProgress(uniqueString, jobFile, 
-                                       splits[i].getClassName(),
-                                       splits[i].getBytes(), 
-                                       jobtracker, conf, this, i);
-          for(String host: splits[i].getLocations()) {
-            List<TaskInProgress> hostMaps = hostToMaps.get(host);
-            if (hostMaps == null) {
-              hostMaps = new ArrayList();
-              hostToMaps.put(host, hostMaps);
-            }
-            hostMaps.add(maps[i]);              
-          }
-        }
+  /**
+   * Construct the splits, etc.  This is invoked from an async
+   * thread so that split-computation doesn't block anyone.
+   */
+  public synchronized void initTasks() throws IOException {
+    if (tasksInited) {
+      return;
+    }
+
+    //
+    // read input splits and create a map per a split
+    //
+    String jobFile = profile.getJobFile();
+
+    FileSystem fs = FileSystem.get(conf);
+    DataInputStream splitFile =
+      fs.open(new Path(conf.get("mapred.job.split.file")));
+    JobClient.RawSplit[] splits;
+    try {
+      splits = JobClient.readSplitFile(splitFile);
+    } finally {
+      splitFile.close();
+    }
+    numMapTasks = splits.length;
+    maps = new TaskInProgress[numMapTasks];
+    for(int i=0; i < numMapTasks; ++i) {
+      maps[i] = new TaskInProgress(uniqueString, jobFile, 
+                                   splits[i].getClassName(),
+                                   splits[i].getBytes(), 
+                                   jobtracker, conf, this, i);
+      for(String host: splits[i].getLocations()) {
+        List<TaskInProgress> hostMaps = hostToMaps.get(host);
+        if (hostMaps == null) {
+          hostMaps = new ArrayList();
+          hostToMaps.put(host, hostMaps);
+        }
+        hostMaps.add(maps[i]);              
+      }
+    }
         
-        // if no split is returned, job is considered completed and successful
-        if (numMapTasks == 0) {
-            // Finished time need to be setted here to prevent this job to be retired
-            // from the job tracker jobs at the next retire iteration.
-            this.finishTime = System.currentTimeMillis();
-            this.status = new JobStatus(status.getJobId(), 1.0f, 1.0f, JobStatus.SUCCEEDED);
-            tasksInited = true;
-            return;
-        }
+    // if no split is returned, job is considered completed and successful
+    if (numMapTasks == 0) {
+      // Finished time need to be setted here to prevent this job to be retired
+      // from the job tracker jobs at the next retire iteration.
+      this.finishTime = System.currentTimeMillis();
+      this.status = new JobStatus(status.getJobId(), 1.0f, 1.0f, JobStatus.SUCCEEDED);
+      tasksInited = true;
+      return;
+    }
         
-        //
-        // Create reduce tasks
-        //
-        this.reduces = new TaskInProgress[numReduceTasks];
-        for (int i = 0; i < numReduceTasks; i++) {
-            reduces[i] = new TaskInProgress(uniqueString, jobFile, 
-                                            numMapTasks, i, 
-                                            jobtracker, conf, this);
-        }
-
-        this.status = new JobStatus(status.getJobId(), 0.0f, 0.0f, JobStatus.RUNNING);
-        tasksInited = true;
+    //
+    // Create reduce tasks
+    //
+    this.reduces = new TaskInProgress[numReduceTasks];
+    for (int i = 0; i < numReduceTasks; i++) {
+      reduces[i] = new TaskInProgress(uniqueString, jobFile, 
+                                      numMapTasks, i, 
+                                      jobtracker, conf, this);
+    }
+
+    this.status = new JobStatus(status.getJobId(), 0.0f, 0.0f, JobStatus.RUNNING);
+    tasksInited = true;
         
-        JobHistory.JobInfo.logStarted(profile.getJobId(), System.currentTimeMillis(), numMapTasks, numReduceTasks);
-    }
-
-    /////////////////////////////////////////////////////
-    // Accessors for the JobInProgress
-    /////////////////////////////////////////////////////
-    public JobProfile getProfile() {
-        return profile;
-    }
-    public JobStatus getStatus() {
-        return status;
-    }
-    public long getStartTime() {
-        return startTime;
-    }
-    public long getFinishTime() {
-        return finishTime;
-    }
-    public int desiredMaps() {
-        return numMapTasks;
-    }
-    public int finishedMaps() {
-        return finishedMapTasks;
-    }
-    public int desiredReduces() {
-        return numReduceTasks;
-    }
-    public synchronized int runningMaps() {
-        return runningMapTasks;
-    }
-    public synchronized int runningReduces() {
-        return runningReduceTasks;
-    }
-    public int finishedReduces() {
-        return finishedReduceTasks;
-    }
+    JobHistory.JobInfo.logStarted(profile.getJobId(), System.currentTimeMillis(), numMapTasks, numReduceTasks);
+  }
+
+  /////////////////////////////////////////////////////
+  // Accessors for the JobInProgress
+  /////////////////////////////////////////////////////
+  public JobProfile getProfile() {
+    return profile;
+  }
+  public JobStatus getStatus() {
+    return status;
+  }
+  public long getStartTime() {
+    return startTime;
+  }
+  public long getFinishTime() {
+    return finishTime;
+  }
+  public int desiredMaps() {
+    return numMapTasks;
+  }
+  public int finishedMaps() {
+    return finishedMapTasks;
+  }
+  public int desiredReduces() {
+    return numReduceTasks;
+  }
+  public synchronized int runningMaps() {
+    return runningMapTasks;
+  }
+  public synchronized int runningReduces() {
+    return runningReduceTasks;
+  }
+  public int finishedReduces() {
+    return finishedReduceTasks;
+  }
  
-    /**
-     * Get the list of map tasks
-     * @return the raw array of maps for this job
-     */
-    TaskInProgress[] getMapTasks() {
-      return maps;
-    }
+  /**
+   * Get the list of map tasks
+   * @return the raw array of maps for this job
+   */
+  TaskInProgress[] getMapTasks() {
+    return maps;
+  }
     
-    /**
-     * Get the list of reduce tasks
-     * @return the raw array of reduce tasks for this job
-     */
-    TaskInProgress[] getReduceTasks() {
-      return reduces;
-    }
+  /**
+   * Get the list of reduce tasks
+   * @return the raw array of reduce tasks for this job
+   */
+  TaskInProgress[] getReduceTasks() {
+    return reduces;
+  }
     
-    /**
-     * Get the job configuration
-     * @return the job's configuration
-     */
-    JobConf getJobConf() {
-      return conf;
-    }
+  /**
+   * Get the job configuration
+   * @return the job's configuration
+   */
+  JobConf getJobConf() {
+    return conf;
+  }
     
-    /**
-     * Return a treeset of completed TaskInProgress objects
-     */
-    public Vector reportTasksInProgress(boolean shouldBeMap, boolean shouldBeComplete) {
-        Vector results = new Vector();
-        TaskInProgress tips[] = null;
-        if (shouldBeMap) {
-            tips = maps;
-        } else {
-            tips = reduces;
-        }
-        for (int i = 0; i < tips.length; i++) {
-            if (tips[i].isComplete() == shouldBeComplete) {
-                results.add(tips[i]);
-            }
-        }
-        return results;
-    }
-
-    ////////////////////////////////////////////////////
-    // Status update methods
-    ////////////////////////////////////////////////////
-    public synchronized void updateTaskStatus(TaskInProgress tip, 
-                                              TaskStatus status,
-                                              JobTrackerMetrics metrics) {
-
-        double oldProgress = tip.getProgress();   // save old progress
-        boolean wasRunning = tip.isRunning();
-        boolean wasComplete = tip.isComplete();
-        boolean change = tip.updateStatus(status);
-        if (change) {
-          TaskStatus.State state = status.getRunState();
-          TaskTrackerStatus ttStatus = 
-            this.jobtracker.getTaskTracker(status.getTaskTracker());
-          String httpTaskLogLocation = null; 
-          if( null != ttStatus ){
-            httpTaskLogLocation = "http://" + ttStatus.getHost() + ":" + 
-              ttStatus.getHttpPort() + "/tasklog.jsp?plaintext=true&taskid=" +
-              status.getTaskId() + "&all=true";
-          }
+  /**
+   * Return a treeset of completed TaskInProgress objects
+   */
+  public Vector reportTasksInProgress(boolean shouldBeMap, boolean shouldBeComplete) {
+    Vector results = new Vector();
+    TaskInProgress tips[] = null;
+    if (shouldBeMap) {
+      tips = maps;
+    } else {
+      tips = reduces;
+    }
+    for (int i = 0; i < tips.length; i++) {
+      if (tips[i].isComplete() == shouldBeComplete) {
+        results.add(tips[i]);
+      }
+    }
+    return results;
+  }
+
+  ////////////////////////////////////////////////////
+  // Status update methods
+  ////////////////////////////////////////////////////
+  public synchronized void updateTaskStatus(TaskInProgress tip, 
+                                            TaskStatus status,
+                                            JobTrackerMetrics metrics) {
+
+    double oldProgress = tip.getProgress();   // save old progress
+    boolean wasRunning = tip.isRunning();
+    boolean wasComplete = tip.isComplete();
+    boolean change = tip.updateStatus(status);
+    if (change) {
+      TaskStatus.State state = status.getRunState();
+      TaskTrackerStatus ttStatus = 
+        this.jobtracker.getTaskTracker(status.getTaskTracker());
+      String httpTaskLogLocation = null; 
+      if( null != ttStatus ){
+        httpTaskLogLocation = "http://" + ttStatus.getHost() + ":" + 
+          ttStatus.getHttpPort() + "/tasklog.jsp?plaintext=true&taskid=" +
+          status.getTaskId() + "&all=true";
+      }
 
-          TaskCompletionEvent taskEvent = null;
-          if (state == TaskStatus.State.SUCCEEDED) {
-            taskEvent = new TaskCompletionEvent(
-                          taskCompletionEventTracker, 
-                          status.getTaskId(),
-                          tip.idWithinJob(),
-                          status.getIsMap(),
-                          TaskCompletionEvent.Status.SUCCEEDED,
-                          httpTaskLogLocation 
-                          );
-            tip.setSuccessEventNumber(taskCompletionEventTracker);
-            completedTask(tip, status, metrics);
-          } else if (state == TaskStatus.State.FAILED ||
-                     state == TaskStatus.State.KILLED) {
-            taskEvent = new TaskCompletionEvent(
-                          taskCompletionEventTracker, 
-                          status.getTaskId(),
-                          tip.idWithinJob(),
-                          status.getIsMap(),
-                          TaskCompletionEvent.Status.FAILED, 
-                          httpTaskLogLocation
-                          );
-            // Get the event number for the (possibly) previously successful
-            // task. If there exists one, then set that status to OBSOLETE 
-            int eventNumber;
-            if ((eventNumber = tip.getSuccessEventNumber()) != -1) {
-              TaskCompletionEvent t = 
-                this.taskCompletionEvents.get(eventNumber);
-              if (t.getTaskId().equals(status.getTaskId()))
-                t.setTaskStatus(TaskCompletionEvent.Status.OBSOLETE);
-            }
-            // Tell the job to fail the relevant task
-            failedTask(tip, status.getTaskId(), status, status.getTaskTracker(),
-                       wasRunning, wasComplete);
-          }          
-
-          // Add the 'complete' task i.e. successful/failed
-          if (taskEvent != null) {
-            this.taskCompletionEvents.add(taskEvent);
-            taskCompletionEventTracker++;
-          }
-        }
+      TaskCompletionEvent taskEvent = null;
+      if (state == TaskStatus.State.SUCCEEDED) {
+        taskEvent = new TaskCompletionEvent(
+                                            taskCompletionEventTracker, 
+                                            status.getTaskId(),
+                                            tip.idWithinJob(),
+                                            status.getIsMap(),
+                                            TaskCompletionEvent.Status.SUCCEEDED,
+                                            httpTaskLogLocation 
+                                            );
+        tip.setSuccessEventNumber(taskCompletionEventTracker);
+        completedTask(tip, status, metrics);
+      } else if (state == TaskStatus.State.FAILED ||
+                 state == TaskStatus.State.KILLED) {
+        taskEvent = new TaskCompletionEvent(
+                                            taskCompletionEventTracker, 
+                                            status.getTaskId(),
+                                            tip.idWithinJob(),
+                                            status.getIsMap(),
+                                            TaskCompletionEvent.Status.FAILED, 
+                                            httpTaskLogLocation
+                                            );
+        // Get the event number for the (possibly) previously successful
+        // task. If there exists one, then set that status to OBSOLETE 
+        int eventNumber;
+        if ((eventNumber = tip.getSuccessEventNumber()) != -1) {
+          TaskCompletionEvent t = 
+            this.taskCompletionEvents.get(eventNumber);
+          if (t.getTaskId().equals(status.getTaskId()))
+            t.setTaskStatus(TaskCompletionEvent.Status.OBSOLETE);
+        }
+        // Tell the job to fail the relevant task
+        failedTask(tip, status.getTaskId(), status, status.getTaskTracker(),
+                   wasRunning, wasComplete);
+      }          
+
+      // Add the 'complete' task i.e. successful/failed
+      if (taskEvent != null) {
+        this.taskCompletionEvents.add(taskEvent);
+        taskCompletionEventTracker++;
+      }
+    }
         
-        //
-        // Update JobInProgress status
-        //
-        LOG.debug("Taking progress for " + tip.getTIPId() + " from " + 
-                  oldProgress + " to " + tip.getProgress());
-        double progressDelta = tip.getProgress() - oldProgress;
-        if (tip.isMapTask()) {
-          if (maps.length == 0) {
-            this.status.setMapProgress(1.0f);
-          } else {
-            this.status.setMapProgress((float) (this.status.mapProgress() +
-                                                progressDelta / maps.length));
-          }
-        } else {
-          if (reduces.length == 0) {
-            this.status.setReduceProgress(1.0f);
-          } else {
-            this.status.setReduceProgress
-                 ((float) (this.status.reduceProgress() +
-                           (progressDelta / reduces.length)));
-          }
-        }
+    //
+    // Update JobInProgress status
+    //
+    LOG.debug("Taking progress for " + tip.getTIPId() + " from " + 
+              oldProgress + " to " + tip.getProgress());
+    double progressDelta = tip.getProgress() - oldProgress;
+    if (tip.isMapTask()) {
+      if (maps.length == 0) {
+        this.status.setMapProgress(1.0f);
+      } else {
+        this.status.setMapProgress((float) (this.status.mapProgress() +
+                                            progressDelta / maps.length));
+      }
+    } else {
+      if (reduces.length == 0) {
+        this.status.setReduceProgress(1.0f);
+      } else {
+        this.status.setReduceProgress
+          ((float) (this.status.reduceProgress() +
+                    (progressDelta / reduces.length)));
+      }
     }
+  }
     
-    /**
-     *  Returns map phase counters by summing over all map tasks in progress.
-     */
-    public synchronized Counters getMapCounters() {
-      return sumTaskCounters(maps);
-    }
+  /**
+   *  Returns map phase counters by summing over all map tasks in progress.
+   */
+  public synchronized Counters getMapCounters() {
+    return sumTaskCounters(maps);
+  }
     
-    /**
-     *  Returns map phase counters by summing over all map tasks in progress.
-     */
-    public synchronized Counters getReduceCounters() {
-      return sumTaskCounters(reduces);
-    }
+  /**
+   *  Returns map phase counters by summing over all map tasks in progress.
+   */
+  public synchronized Counters getReduceCounters() {
+    return sumTaskCounters(reduces);
+  }
     
-    /**
-     *  Returns the total job counters, by adding together the map and the
-     *  reduce counters.
-     */
-    public Counters getCounters() {
-      return Counters.sum(getMapCounters(), getReduceCounters());
-    }
+  /**
+   *  Returns the total job counters, by adding together the map and the
+   *  reduce counters.
+   */
+  public Counters getCounters() {
+    return Counters.sum(getMapCounters(), getReduceCounters());
+  }
     
-    /**
-     * Returns a Counters instance representing the sum of all the counters in
-     * the array of tasks in progress.
-     */
-    private Counters sumTaskCounters(TaskInProgress[] tips) {
-      Counters counters = new Counters();
-      for (TaskInProgress tip : tips) {
-        counters.incrAllCounters(tip.getCounters());
-      }
-      return counters;
-    }
-
-    /////////////////////////////////////////////////////
-    // Create/manage tasks
-    /////////////////////////////////////////////////////
-    /**
-     * Return a MapTask, if appropriate, to run on the given tasktracker
-     */
-    public Task obtainNewMapTask(TaskTrackerStatus tts, int clusterSize
-                                 ) throws IOException {
-      if (! tasksInited) {
-        LOG.info("Cannot create task split for " + profile.getJobId());
-        return null;
-      }
-      ArrayList mapCache = (ArrayList)hostToMaps.get(tts.getHost());
-      int target = findNewTask(tts, clusterSize, status.mapProgress(), 
-                                  maps, mapCache);
-      if (target == -1) {
-        return null;
-      }
-      boolean wasRunning = maps[target].isRunning();
-      Task result = maps[target].getTaskToRun(tts.getTrackerName());
-      if (!wasRunning) {
-        runningMapTasks += 1;
-        JobHistory.Task.logStarted(profile.getJobId(), 
-            maps[target].getTIPId(), Values.MAP.name(),
-            System.currentTimeMillis());
-      }
-      return result;
-    }    
-
-    /**
-     * Return a ReduceTask, if appropriate, to run on the given tasktracker.
-     * We don't have cache-sensitivity for reduce tasks, as they
-     *  work on temporary MapRed files.  
-     */
-    public Task obtainNewReduceTask(TaskTrackerStatus tts,
-                                    int clusterSize) throws IOException {
-        if (! tasksInited) {
-            LOG.info("Cannot create task split for " + profile.getJobId());
-            return null;
-        }
+  /**
+   * Returns a Counters instance representing the sum of all the counters in
+   * the array of tasks in progress.
+   */
+  private Counters sumTaskCounters(TaskInProgress[] tips) {
+    Counters counters = new Counters();
+    for (TaskInProgress tip : tips) {
+      counters.incrAllCounters(tip.getCounters());
+    }
+    return counters;
+  }
+
+  /////////////////////////////////////////////////////
+  // Create/manage tasks
+  /////////////////////////////////////////////////////
+  /**
+   * Return a MapTask, if appropriate, to run on the given tasktracker
+   */
+  public Task obtainNewMapTask(TaskTrackerStatus tts, int clusterSize
+                               ) throws IOException {
+    if (! tasksInited) {
+      LOG.info("Cannot create task split for " + profile.getJobId());
+      return null;
+    }
+    ArrayList mapCache = (ArrayList)hostToMaps.get(tts.getHost());
+    int target = findNewTask(tts, clusterSize, status.mapProgress(), 
+                             maps, mapCache);
+    if (target == -1) {
+      return null;
+    }
+    boolean wasRunning = maps[target].isRunning();
+    Task result = maps[target].getTaskToRun(tts.getTrackerName());
+    if (!wasRunning) {
+      runningMapTasks += 1;
+      JobHistory.Task.logStarted(profile.getJobId(), 
+                                 maps[target].getTIPId(), Values.MAP.name(),
+                                 System.currentTimeMillis());
+    }
+    return result;
+  }    
+
+  /**
+   * Return a ReduceTask, if appropriate, to run on the given tasktracker.
+   * We don't have cache-sensitivity for reduce tasks, as they
+   *  work on temporary MapRed files.  
+   */
+  public Task obtainNewReduceTask(TaskTrackerStatus tts,
+                                  int clusterSize) throws IOException {
+    if (! tasksInited) {
+      LOG.info("Cannot create task split for " + profile.getJobId());
+      return null;
+    }
 
-        int target = findNewTask(tts, clusterSize, status.reduceProgress() , 
-                                    reduces, null);
-        if (target == -1) {
-          return null;
-        }
-        boolean wasRunning = reduces[target].isRunning();
-        Task result = reduces[target].getTaskToRun(tts.getTrackerName());
-        if (!wasRunning) {
-          runningReduceTasks += 1;
-          JobHistory.Task.logStarted(profile.getJobId(), 
-              reduces[target].getTIPId(), Values.REDUCE.name(),
-              System.currentTimeMillis());
-        }
-        return result;
+    int target = findNewTask(tts, clusterSize, status.reduceProgress() , 
+                             reduces, null);
+    if (target == -1) {
+      return null;
     }
-    
-    private String convertTrackerNameToHostName(String trackerName) {
-      // Ugly!
-      // Convert the trackerName to it's host name
-      int indexOfColon = trackerName.indexOf(":");
-      String trackerHostName = (indexOfColon == -1) ? 
-                                trackerName : 
-                                trackerName.substring(0, indexOfColon);
-      return trackerHostName;
+    boolean wasRunning = reduces[target].isRunning();
+    Task result = reduces[target].getTaskToRun(tts.getTrackerName());
+    if (!wasRunning) {
+      runningReduceTasks += 1;
+      JobHistory.Task.logStarted(profile.getJobId(), 
+                                 reduces[target].getTIPId(), Values.REDUCE.name(),
+                                 System.currentTimeMillis());
     }
+    return result;
+  }
     
-    private void addTrackerTaskFailure(String trackerName) {
-      String trackerHostName = convertTrackerNameToHostName(trackerName);
+  private String convertTrackerNameToHostName(String trackerName) {
+    // Ugly!
+    // Convert the trackerName to it's host name
+    int indexOfColon = trackerName.indexOf(":");
+    String trackerHostName = (indexOfColon == -1) ? 
+      trackerName : 
+      trackerName.substring(0, indexOfColon);
+    return trackerHostName;
+  }
+    
+  private void addTrackerTaskFailure(String trackerName) {
+    String trackerHostName = convertTrackerNameToHostName(trackerName);
       
-      Integer trackerFailures = trackerToFailuresMap.get(trackerHostName);
-      if (trackerFailures == null) {
-        trackerFailures = new Integer(0);
-      }
-      trackerToFailuresMap.put(trackerHostName, ++trackerFailures);
+    Integer trackerFailures = trackerToFailuresMap.get(trackerHostName);
+    if (trackerFailures == null) {
+      trackerFailures = new Integer(0);
+    }
+    trackerToFailuresMap.put(trackerHostName, ++trackerFailures);
       
-      // Check if this tasktracker has turned 'flaky'
-      if (trackerFailures.intValue() == conf.getMaxTaskFailuresPerTracker()) {
-        ++flakyTaskTrackers;
-        LOG.info("TaskTracker at '" + trackerHostName + "' turned 'flaky'");
-      }
+    // Check if this tasktracker has turned 'flaky'
+    if (trackerFailures.intValue() == conf.getMaxTaskFailuresPerTracker()) {
+      ++flakyTaskTrackers;
+      LOG.info("TaskTracker at '" + trackerHostName + "' turned 'flaky'");
     }
+  }
     
-    private int getTrackerTaskFailures(String trackerName) {
-      String trackerHostName = convertTrackerNameToHostName(trackerName);
-      Integer failedTasks = trackerToFailuresMap.get(trackerHostName);
-      return (failedTasks != null) ? failedTasks.intValue() : 0; 
-    }
+  private int getTrackerTaskFailures(String trackerName) {
+    String trackerHostName = convertTrackerNameToHostName(trackerName);
+    Integer failedTasks = trackerToFailuresMap.get(trackerHostName);
+    return (failedTasks != null) ? failedTasks.intValue() : 0; 
+  }
     
-    /**
-     * Get the no. of 'flaky' tasktrackers for a given job.
-     * 
-     * @return the no. of 'flaky' tasktrackers for a given job.
-     */
-    int getNoOfBlackListedTrackers() {
-      return flakyTaskTrackers;
-    }
+  /**
+   * Get the no. of 'flaky' tasktrackers for a given job.
+   * 
+   * @return the no. of 'flaky' tasktrackers for a given job.
+   */
+  int getNoOfBlackListedTrackers() {
+    return flakyTaskTrackers;
+  }
     
-    /**
-     * Get the information on tasktrackers and no. of errors which occurred
-     * on them for a given job. 
-     * 
-     * @return the map of tasktrackers and no. of errors which occurred
-     *         on them for a given job. 
-     */
-    synchronized Map<String, Integer> getTaskTrackerErrors() {
-      // Clone the 'trackerToFailuresMap' and return the copy
-      Map<String, Integer> trackerErrors = 
-        new TreeMap<String, Integer>(trackerToFailuresMap);
-      return trackerErrors;
-    }
+  /**
+   * Get the information on tasktrackers and no. of errors which occurred
+   * on them for a given job. 
+   * 
+   * @return the map of tasktrackers and no. of errors which occurred
+   *         on them for a given job. 
+   */
+  synchronized Map<String, Integer> getTaskTrackerErrors() {
+    // Clone the 'trackerToFailuresMap' and return the copy
+    Map<String, Integer> trackerErrors = 
+      new TreeMap<String, Integer>(trackerToFailuresMap);
+    return trackerErrors;
+  }
     
-    /**
-     * Find a new task to run.
-     * @param tts The task tracker that is asking for a task
-     * @param clusterSize The number of task trackers in the cluster
-     * @param avgProgress The average progress of this kind of task in this job
-     * @param tasks The list of potential tasks to try
-     * @param firstTaskToTry The first index in tasks to check
-     * @param cachedTasks A list of tasks that would like to run on this node
-     * @return the index in tasks of the selected task (or -1 for no task)
-     */
-    private int findNewTask(TaskTrackerStatus tts, 
-                            int clusterSize,
-                            double avgProgress,
-                            TaskInProgress[] tasks,
-                            List cachedTasks) {
-        String taskTracker = tts.getTrackerName();
-
-        //
-        // Check if too many tasks of this job have failed on this
-        // tasktracker prior to assigning it a new one.
-        //
-        int taskTrackerFailedTasks = getTrackerTaskFailures(taskTracker);
-        if (taskTrackerFailedTasks >= conf.getMaxTaskFailuresPerTracker()) {
-          String flakyTracker = convertTrackerNameToHostName(taskTracker); 
-          if (flakyTaskTrackers < clusterSize) {
-            LOG.debug("Ignoring the black-listed tasktracker: '" + flakyTracker 
-                    + "' for assigning a new task");
-            return -1;
-          } else {
-            LOG.warn("Trying to assign a new task for black-listed tracker " + 
-                    flakyTracker + " since all task-trackers in the cluster are " +
-                    "'flaky' !");
-          }
-        }
+  /**
+   * Find a new task to run.
+   * @param tts The task tracker that is asking for a task
+   * @param clusterSize The number of task trackers in the cluster
+   * @param avgProgress The average progress of this kind of task in this job
+   * @param tasks The list of potential tasks to try
+   * @param firstTaskToTry The first index in tasks to check
+   * @param cachedTasks A list of tasks that would like to run on this node
+   * @return the index in tasks of the selected task (or -1 for no task)
+   */
+  private int findNewTask(TaskTrackerStatus tts, 
+                          int clusterSize,
+                          double avgProgress,
+                          TaskInProgress[] tasks,
+                          List cachedTasks) {
+    String taskTracker = tts.getTrackerName();
+
+    //
+    // Check if too many tasks of this job have failed on this
+    // tasktracker prior to assigning it a new one.
+    //
+    int taskTrackerFailedTasks = getTrackerTaskFailures(taskTracker);
+    if (taskTrackerFailedTasks >= conf.getMaxTaskFailuresPerTracker()) {
+      String flakyTracker = convertTrackerNameToHostName(taskTracker); 
+      if (flakyTaskTrackers < clusterSize) {
+        LOG.debug("Ignoring the black-listed tasktracker: '" + flakyTracker 
+                  + "' for assigning a new task");
+        return -1;
+      } else {
+        LOG.warn("Trying to assign a new task for black-listed tracker " + 
+                 flakyTracker + " since all task-trackers in the cluster are " +
+                 "'flaky' !");
+      }
+    }
         
-        //
-        // See if there is a split over a block that is stored on
-        // the TaskTracker checking in.  That means the block
-        // doesn't have to be transmitted from another node.
-        //
-        if (cachedTasks != null) {
-          Iterator i = cachedTasks.iterator();
-          while (i.hasNext()) {
-            TaskInProgress tip = (TaskInProgress)i.next();
-            i.remove();
-            if (tip.isRunnable() && 
-                !tip.isRunning() &&
-                !tip.hasFailedOnMachine(taskTracker)) {
-              LOG.info("Choosing cached task " + tip.getTIPId());
-              int cacheTarget = tip.getIdWithinJob();
-              return cacheTarget;
-            }
-          }
+    //
+    // See if there is a split over a block that is stored on
+    // the TaskTracker checking in.  That means the block
+    // doesn't have to be transmitted from another node.
+    //
+    if (cachedTasks != null) {
+      Iterator i = cachedTasks.iterator();
+      while (i.hasNext()) {
+        TaskInProgress tip = (TaskInProgress)i.next();
+        i.remove();
+        if (tip.isRunnable() && 
+            !tip.isRunning() &&
+            !tip.hasFailedOnMachine(taskTracker)) {
+          LOG.info("Choosing cached task " + tip.getTIPId());
+          int cacheTarget = tip.getIdWithinJob();
+          return cacheTarget;
         }
+      }
+    }
 
 
-        //
-        // If there's no cached target, see if there's
-        // a std. task to run.
-        //
-        int failedTarget = -1;
-        int specTarget = -1;
-        for (int i = 0; i < tasks.length; i++) {
-          TaskInProgress task = tasks[i];
-          if (task.isRunnable()) {
-            // if it failed here and we haven't tried every machine, we
-            // don't schedule it here.
-            boolean hasFailed = task.hasFailedOnMachine(taskTracker);
-            if (hasFailed && (task.getNumberOfFailedMachines() < clusterSize)) {
-              continue;
-            }
-            boolean isRunning = task.isRunning();
-            if (hasFailed) {
-              // failed tasks that aren't running can be scheduled as a last
-              // resort
-              if (!isRunning && failedTarget == -1) {
-                failedTarget = i;
-              }
-            } else {
-              if (!isRunning) {
-                LOG.info("Choosing normal task " + tasks[i].getTIPId());
-                return i;
-              } else if (specTarget == -1 &&
-                         task.hasSpeculativeTask(avgProgress) && 
-                         ! task.hasRunOnMachine(taskTracker)) {
-                specTarget = i;
-              }
-            }
+    //
+    // If there's no cached target, see if there's
+    // a std. task to run.
+    //
+    int failedTarget = -1;
+    int specTarget = -1;
+    for (int i = 0; i < tasks.length; i++) {
+      TaskInProgress task = tasks[i];
+      if (task.isRunnable()) {
+        // if it failed here and we haven't tried every machine, we
+        // don't schedule it here.
+        boolean hasFailed = task.hasFailedOnMachine(taskTracker);
+        if (hasFailed && (task.getNumberOfFailedMachines() < clusterSize)) {
+          continue;
+        }
+        boolean isRunning = task.isRunning();
+        if (hasFailed) {
+          // failed tasks that aren't running can be scheduled as a last
+          // resort
+          if (!isRunning && failedTarget == -1) {
+            failedTarget = i;
+          }
+        } else {
+          if (!isRunning) {
+            LOG.info("Choosing normal task " + tasks[i].getTIPId());
+            return i;
+          } else if (specTarget == -1 &&
+                     task.hasSpeculativeTask(avgProgress) && 
+                     ! task.hasRunOnMachine(taskTracker)) {
+            specTarget = i;
           }
         }
-        if (specTarget != -1) {
-          LOG.info("Choosing speculative task " + 
-                    tasks[specTarget].getTIPId());
-        } else if (failedTarget != -1) {
-          LOG.info("Choosing failed task " + 
-                    tasks[failedTarget].getTIPId());          
-        }
-        return specTarget != -1 ? specTarget : failedTarget;
+      }
     }
-
-    /**
-     * A taskid assigned to this JobInProgress has reported in successfully.
-     */
-    public synchronized void completedTask(TaskInProgress tip, 
-                                           TaskStatus status,
-                                           JobTrackerMetrics metrics) {
-        String taskid = status.getTaskId();
+    if (specTarget != -1) {
+      LOG.info("Choosing speculative task " + 
+               tasks[specTarget].getTIPId());
+    } else if (failedTarget != -1) {
+      LOG.info("Choosing failed task " + 
+               tasks[failedTarget].getTIPId());          
+    }
+    return specTarget != -1 ? specTarget : failedTarget;
+  }
+
+  /**
+   * A taskid assigned to this JobInProgress has reported in successfully.
+   */
+  public synchronized void completedTask(TaskInProgress tip, 
+                                         TaskStatus status,
+                                         JobTrackerMetrics metrics) {
+    String taskid = status.getTaskId();
         
-        // Sanity check: is the TIP already complete?
-        if (tip.isComplete()) {
-          LOG.info("Already complete TIP " + tip.getTIPId() + 
+    // Sanity check: is the TIP already complete?
+    if (tip.isComplete()) {
+      LOG.info("Already complete TIP " + tip.getTIPId() + 
                " has completed task " + taskid);
           
-          // Just mark this 'task' as complete
-          tip.completedTask(taskid);
+      // Just mark this 'task' as complete
+      tip.completedTask(taskid);
           
-          // Let the JobTracker cleanup this taskid if the job isn't running
-          if (this.status.getRunState() != JobStatus.RUNNING) {
-            jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid);
-          }
-          return;
-        } 
-
-        LOG.info("Task '" + taskid + "' has completed " + tip.getTIPId() + 
-          " successfully.");          
-
-        // Update jobhistory 
-        String taskTrackerName = status.getTaskTracker();
-        if(status.getIsMap()){
-          JobHistory.MapAttempt.logStarted(profile.getJobId(), 
-               tip.getTIPId(), status.getTaskId(), status.getStartTime(), 
-               taskTrackerName); 
-          JobHistory.MapAttempt.logFinished(profile.getJobId(), 
-               tip.getTIPId(), status.getTaskId(), status.getFinishTime(), 
-               taskTrackerName); 
-          JobHistory.Task.logFinished(profile.getJobId(), tip.getTIPId(), 
-               Values.MAP.name(), status.getFinishTime()); 
-        }else{
-          JobHistory.ReduceAttempt.logStarted(profile.getJobId(), 
-               tip.getTIPId(), status.getTaskId(), status.getStartTime(), 
-               taskTrackerName); 
-          JobHistory.ReduceAttempt.logFinished(profile.getJobId(), 
-               tip.getTIPId(), status.getTaskId(), status.getShuffleFinishTime(),
-               status.getSortFinishTime(), status.getFinishTime(), 
-               taskTrackerName); 
-          JobHistory.Task.logFinished(profile.getJobId(), tip.getTIPId(), 
-               Values.REDUCE.name(), status.getFinishTime()); 
-        }
+      // Let the JobTracker cleanup this taskid if the job isn't running
+      if (this.status.getRunState() != JobStatus.RUNNING) {
+        jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid);
+      }
+      return;
+    } 
+
+    LOG.info("Task '" + taskid + "' has completed " + tip.getTIPId() + 
+             " successfully.");          
+
+    // Update jobhistory 
+    String taskTrackerName = status.getTaskTracker();
+    if(status.getIsMap()){
+      JobHistory.MapAttempt.logStarted(profile.getJobId(), 
+                                       tip.getTIPId(), status.getTaskId(), status.getStartTime(), 
+                                       taskTrackerName); 
+      JobHistory.MapAttempt.logFinished(profile.getJobId(), 
+                                        tip.getTIPId(), status.getTaskId(), status.getFinishTime(), 
+                                        taskTrackerName); 
+      JobHistory.Task.logFinished(profile.getJobId(), tip.getTIPId(), 
+                                  Values.MAP.name(), status.getFinishTime()); 
+    }else{
+      JobHistory.ReduceAttempt.logStarted(profile.getJobId(), 
+                                          tip.getTIPId(), status.getTaskId(), status.getStartTime(), 
+                                          taskTrackerName); 
+      JobHistory.ReduceAttempt.logFinished(profile.getJobId(), 
+                                           tip.getTIPId(), status.getTaskId(), status.getShuffleFinishTime(),
+                                           status.getSortFinishTime(), status.getFinishTime(), 
+                                           taskTrackerName); 
+      JobHistory.Task.logFinished(profile.getJobId(), tip.getTIPId(), 
+                                  Values.REDUCE.name(), status.getFinishTime()); 
+    }
         
-        // Mark the TIP as complete
-        tip.completed(taskid);
+    // Mark the TIP as complete
+    tip.completed(taskid);
         
-        // Update the running/finished map/reduce counts
-        if (tip.isMapTask()){
-          runningMapTasks -= 1;
-          finishedMapTasks += 1;
-          metrics.completeMap();
-        } else{
-          runningReduceTasks -= 1;
-          finishedReduceTasks += 1;
-          metrics.completeReduce();
-        }
+    // Update the running/finished map/reduce counts
+    if (tip.isMapTask()){
+      runningMapTasks -= 1;
+      finishedMapTasks += 1;
+      metrics.completeMap();
+    } else{
+      runningReduceTasks -= 1;
+      finishedReduceTasks += 1;
+      metrics.completeReduce();
+    }
         
-        //
-        // Figure out whether the Job is done
-        //
-        boolean allDone = true;
-        for (int i = 0; i < maps.length; i++) {
-            if (! maps[i].isComplete()) {
-                allDone = false;
-                break;
-            }
-        }
-        if (allDone) {
-            if (tip.isMapTask()) {
-              this.status.setMapProgress(1.0f);              
-            }
-            for (int i = 0; i < reduces.length; i++) {
-                if (! reduces[i].isComplete()) {
-                    allDone = false;
-                    break;
-                }
-            }
-        }
-
-        //
-        // If all tasks are complete, then the job is done!
-        //
-        if (this.status.getRunState() == JobStatus.RUNNING && allDone) {
-            this.status.setRunState(JobStatus.SUCCEEDED);
-            this.status.setReduceProgress(1.0f);
-            this.finishTime = System.currentTimeMillis();
-            garbageCollect();
-            LOG.info("Job " + this.status.getJobId() + 
-                     " has completed successfully.");
-            JobHistory.JobInfo.logFinished(this.status.getJobId(), finishTime, 
-                this.finishedMapTasks, this.finishedReduceTasks, failedMapTasks, failedReduceTasks);
-            metrics.completeJob();
-        } else if (this.status.getRunState() != JobStatus.RUNNING) {
-            // The job has been killed/failed, 
-            // JobTracker should cleanup this task
-            jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid);
-        }
+    //
+    // Figure out whether the Job is done
+    //
+    boolean allDone = true;
+    for (int i = 0; i < maps.length; i++) {
+      if (! maps[i].isComplete()) {
+        allDone = false;
+        break;
+      }
     }
-
-    /**
-     * Kill the job and all its component tasks.
-     */
-    public synchronized void kill() {
-        if (status.getRunState() != JobStatus.FAILED) {
-            LOG.info("Killing job '" + this.status.getJobId() + "'");
-            this.status = new JobStatus(status.getJobId(), 1.0f, 1.0f, JobStatus.FAILED);
-            this.finishTime = System.currentTimeMillis();
-            this.runningMapTasks = 0;
-            this.runningReduceTasks = 0;
-            //
-            // kill all TIPs.
-            //
-            for (int i = 0; i < maps.length; i++) {
-                maps[i].kill();
-            }
-            for (int i = 0; i < reduces.length; i++) {
-                reduces[i].kill();
-            }
-            JobHistory.JobInfo.logFailed(this.status.getJobId(), finishTime, 
-                this.finishedMapTasks, this.finishedReduceTasks);
-            garbageCollect();
+    if (allDone) {
+      if (tip.isMapTask()) {
+        this.status.setMapProgress(1.0f);              
+      }
+      for (int i = 0; i < reduces.length; i++) {
+        if (! reduces[i].isComplete()) {
+          allDone = false;
+          break;
         }
+      }
     }
 
-    /**
-     * A task assigned to this JobInProgress has reported in as failed.
-     * Most of the time, we'll just reschedule execution.  However, after
-     * many repeated failures we may instead decide to allow the entire 
-     * job to fail.
-     *
-     * Even if a task has reported as completed in the past, it might later
-     * be reported as failed.  That's because the TaskTracker that hosts a map
-     * task might die before the entire job can complete.  If that happens,
-     * we need to schedule reexecution so that downstream reduce tasks can 
-     * obtain the map task's output.
-     */
-    private void failedTask(TaskInProgress tip, String taskid, 
-                            TaskStatus status, String trackerName,
-                            boolean wasRunning, boolean wasComplete) {
-        // Mark the taskid as a 'failure'
-        tip.failedSubTask(taskid, trackerName);
+    //
+    // If all tasks are complete, then the job is done!
+    //
+    if (this.status.getRunState() == JobStatus.RUNNING && allDone) {
+      this.status.setRunState(JobStatus.SUCCEEDED);
+      this.status.setReduceProgress(1.0f);
+      this.finishTime = System.currentTimeMillis();
+      garbageCollect();
+      LOG.info("Job " + this.status.getJobId() + 
+               " has completed successfully.");
+      JobHistory.JobInfo.logFinished(this.status.getJobId(), finishTime, 
+                                     this.finishedMapTasks, this.finishedReduceTasks, failedMapTasks, failedReduceTasks);
+      metrics.completeJob();
+    } else if (this.status.getRunState() != JobStatus.RUNNING) {
+      // The job has been killed/failed, 
+      // JobTracker should cleanup this task
+      jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid);
+    }
+  }
+
+  /**
+   * Kill the job and all its component tasks.
+   */
+  public synchronized void kill() {
+    if (status.getRunState() != JobStatus.FAILED) {
+      LOG.info("Killing job '" + this.status.getJobId() + "'");
+      this.status = new JobStatus(status.getJobId(), 1.0f, 1.0f, JobStatus.FAILED);
+      this.finishTime = System.currentTimeMillis();
+      this.runningMapTasks = 0;
+      this.runningReduceTasks = 0;
+      //
+      // kill all TIPs.
+      //
+      for (int i = 0; i < maps.length; i++) {
+        maps[i].kill();
+      }
+      for (int i = 0; i < reduces.length; i++) {
+        reduces[i].kill();
+      }
+      JobHistory.JobInfo.logFailed(this.status.getJobId(), finishTime, 
+                                   this.finishedMapTasks, this.finishedReduceTasks);
+      garbageCollect();
+    }
+  }
+
+  /**
+   * A task assigned to this JobInProgress has reported in as failed.
+   * Most of the time, we'll just reschedule execution.  However, after
+   * many repeated failures we may instead decide to allow the entire 
+   * job to fail.
+   *
+   * Even if a task has reported as completed in the past, it might later
+   * be reported as failed.  That's because the TaskTracker that hosts a map
+   * task might die before the entire job can complete.  If that happens,
+   * we need to schedule reexecution so that downstream reduce tasks can 
+   * obtain the map task's output.
+   */
+  private void failedTask(TaskInProgress tip, String taskid, 
+                          TaskStatus status, String trackerName,
+                          boolean wasRunning, boolean wasComplete) {
+    // Mark the taskid as a 'failure'
+    tip.failedSubTask(taskid, trackerName);
         
-        boolean isRunning = tip.isRunning();
-        boolean isComplete = tip.isComplete();
+    boolean isRunning = tip.isRunning();
+    boolean isComplete = tip.isComplete();
         
-        //update running  count on task failure.
-        if (wasRunning && !isRunning) {
-          if (tip.isMapTask()){
-            runningMapTasks -= 1;
-          } else {
-            runningReduceTasks -= 1;
-          }
-        }
+    //update running  count on task failure.
+    if (wasRunning && !isRunning) {
+      if (tip.isMapTask()){
+        runningMapTasks -= 1;
+      } else {
+        runningReduceTasks -= 1;
+      }
+    }
         
-        // the case when the map was complete but the task tracker went down.
-        if (wasComplete && !isComplete) {
-          if (tip.isMapTask()){
-            finishedMapTasks -= 1;
-          }
-        }
+    // the case when the map was complete but the task tracker went down.
+    if (wasComplete && !isComplete) {
+      if (tip.isMapTask()){
+        finishedMapTasks -= 1;
+      }
+    }
         
-        // update job history
-        String taskTrackerName = status.getTaskTracker();
-        if (status.getIsMap()) {
-          JobHistory.MapAttempt.logStarted(profile.getJobId(), 
-              tip.getTIPId(), status.getTaskId(), status.getStartTime(), 
-              taskTrackerName); 
-          JobHistory.MapAttempt.logFailed(profile.getJobId(), 
-              tip.getTIPId(), status.getTaskId(), System.currentTimeMillis(),
-              taskTrackerName, status.getDiagnosticInfo()); 
-        } else {
-          JobHistory.ReduceAttempt.logStarted(profile.getJobId(), 
-              tip.getTIPId(), status.getTaskId(), status.getStartTime(), 
-              taskTrackerName); 
-          JobHistory.ReduceAttempt.logFailed(profile.getJobId(), 
-              tip.getTIPId(), status.getTaskId(), System.currentTimeMillis(),
-              taskTrackerName, status.getDiagnosticInfo()); 
-        }
+    // update job history
+    String taskTrackerName = status.getTaskTracker();
+    if (status.getIsMap()) {
+      JobHistory.MapAttempt.logStarted(profile.getJobId(), 
+                                       tip.getTIPId(), status.getTaskId(), status.getStartTime(), 
+                                       taskTrackerName); 
+      JobHistory.MapAttempt.logFailed(profile.getJobId(), 
+                                      tip.getTIPId(), status.getTaskId(), System.currentTimeMillis(),
+                                      taskTrackerName, status.getDiagnosticInfo()); 
+    } else {
+      JobHistory.ReduceAttempt.logStarted(profile.getJobId(), 
+                                          tip.getTIPId(), status.getTaskId(), status.getStartTime(), 
+                                          taskTrackerName); 
+      JobHistory.ReduceAttempt.logFailed(profile.getJobId(), 
+                                         tip.getTIPId(), status.getTaskId(), System.currentTimeMillis(),
+                                         taskTrackerName, status.getDiagnosticInfo()); 
+    }
         
-        // After this, try to assign tasks with the one after this, so that
-        // the failed task goes to the end of the list.
-        if (tip.isMapTask()) {
-          failedMapTasks++; 
-        } else {
-          failedReduceTasks++; 
-        }
+    // After this, try to assign tasks with the one after this, so that
+    // the failed task goes to the end of the list.
+    if (tip.isMapTask()) {
+      failedMapTasks++; 
+    } else {
+      failedReduceTasks++; 
+    }
             
-        //
-        // Note down that a task has failed on this tasktracker
-        //
-        addTrackerTaskFailure(trackerName);
+    //
+    // Note down that a task has failed on this tasktracker
+    //
+    addTrackerTaskFailure(trackerName);
         
-        //
-        // Let the JobTracker know that this task has failed
-        //
-        jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid);
-
-        //
-        // Check if we need to kill the job because of too many failures
-        //
-        if (tip.isFailed()) {
-            LOG.info("Aborting job " + profile.getJobId());
-            JobHistory.Task.logFailed(profile.getJobId(), tip.getTIPId(), 
-                tip.isMapTask() ? Values.MAP.name():Values.REDUCE.name(),  
-                System.currentTimeMillis(), status.getDiagnosticInfo());
-            JobHistory.JobInfo.logFailed(profile.getJobId(), 
-                System.currentTimeMillis(), this.finishedMapTasks, this.finishedReduceTasks);
-            kill();
-        }
-    }
-
-    /**
-     * Fail a task with a given reason, but without a status object.
-     * @author Owen O'Malley
-     * @param tip The task's tip
-     * @param taskid The task id
-     * @param reason The reason that the task failed
-     * @param trackerName The task tracker the task failed on
-     */
-    public void failedTask(TaskInProgress tip, String taskid, 
-                           String reason, TaskStatus.Phase phase, 
-                           String hostname, String trackerName,
-                           JobTrackerMetrics metrics) {
-       TaskStatus status = new TaskStatus(taskid,
-                                          tip.isMapTask(),
-                                          0.0f,
-                                          TaskStatus.State.FAILED,
-                                          reason,
-                                          reason,
-                                          trackerName, phase,
-                                          tip.getCounters());
-       updateTaskStatus(tip, status, metrics);
-       JobHistory.Task.logFailed(profile.getJobId(), tip.getTIPId(), 
-           tip.isMapTask() ? Values.MAP.name() : Values.REDUCE.name(), 
-           System.currentTimeMillis(), reason); 
-    }
+    //
+    // Let the JobTracker know that this task has failed
+    //
+    jobtracker.markCompletedTaskAttempt(status.getTaskTracker(), taskid);
+
+    //
+    // Check if we need to kill the job because of too many failures
+    //
+    if (tip.isFailed()) {
+      LOG.info("Aborting job " + profile.getJobId());
+      JobHistory.Task.logFailed(profile.getJobId(), tip.getTIPId(), 
+                                tip.isMapTask() ? Values.MAP.name():Values.REDUCE.name(),  
+                                System.currentTimeMillis(), status.getDiagnosticInfo());
+      JobHistory.JobInfo.logFailed(profile.getJobId(), 
+                                   System.currentTimeMillis(), this.finishedMapTasks, this.finishedReduceTasks);
+      kill();
+    }
+  }
+
+  /**
+   * Fail a task with a given reason, but without a status object.
+   * @author Owen O'Malley
+   * @param tip The task's tip
+   * @param taskid The task id
+   * @param reason The reason that the task failed
+   * @param trackerName The task tracker the task failed on
+   */
+  public void failedTask(TaskInProgress tip, String taskid, 
+                         String reason, TaskStatus.Phase phase, 
+                         String hostname, String trackerName,
+                         JobTrackerMetrics metrics) {
+    TaskStatus status = new TaskStatus(taskid,
+                                       tip.isMapTask(),
+                                       0.0f,
+                                       TaskStatus.State.FAILED,
+                                       reason,
+                                       reason,
+                                       trackerName, phase,
+                                       tip.getCounters());
+    updateTaskStatus(tip, status, metrics);
+    JobHistory.Task.logFailed(profile.getJobId(), tip.getTIPId(), 
+                              tip.isMapTask() ? Values.MAP.name() : Values.REDUCE.name(), 
+                              System.currentTimeMillis(), reason); 
+  }
        
                            
-    /**
-     * The job is dead.  We're now GC'ing it, getting rid of the job
-     * from all tables.  Be sure to remove all of this job's tasks
-     * from the various tables.
-     */
-    synchronized void garbageCollect() {
-      // Let the JobTracker know that a job is complete
-      jobtracker.finalizeJob(this);
+  /**
+   * The job is dead.  We're now GC'ing it, getting rid of the job
+   * from all tables.  Be sure to remove all of this job's tasks
+   * from the various tables.
+   */
+  synchronized void garbageCollect() {
+    // Let the JobTracker know that a job is complete
+    jobtracker.finalizeJob(this);
       
-      try {
-        // Definitely remove the local-disk copy of the job file
-        if (localJobFile != null) {
-            localFs.delete(localJobFile);
-            localJobFile = null;
-        }
-        if (localJarFile != null) {
-            localFs.delete(localJarFile);
-            localJarFile = null;
-        }
+    try {
+      // Definitely remove the local-disk copy of the job file
+      if (localJobFile != null) {
+        localFs.delete(localJobFile);
+        localJobFile = null;
+      }
+      if (localJarFile != null) {
+        localFs.delete(localJarFile);
+        localJarFile = null;
+      }
 
-        // JobClient always creates a new directory with job files
-        // so we remove that directory to cleanup
-        FileSystem fs = FileSystem.get(conf);
-        fs.delete(new Path(profile.getJobFile()).getParent());
+      // JobClient always creates a new directory with job files
+      // so we remove that directory to cleanup
+      FileSystem fs = FileSystem.get(conf);
+      fs.delete(new Path(profile.getJobFile()).getParent());
         
-        // Delete temp dfs dirs created if any, like in case of 
-        // speculative exn of reduces.  
-        String tempDir = conf.get("mapred.system.dir") + "/job_" + uniqueString; 
-        fs.delete(new Path(tempDir)); 
-
-      } catch (IOException e) {
-        LOG.warn("Error cleaning up "+profile.getJobId()+": "+e);
+      // Delete temp dfs dirs created if any, like in case of 
+      // speculative exn of reduces.  
+      String tempDir = conf.get("mapred.system.dir") + "/job_" + uniqueString; 
+      fs.delete(new Path(tempDir)); 
+
+    } catch (IOException e) {
+      LOG.warn("Error cleaning up "+profile.getJobId()+": "+e);
+    }
+  }
+
+  /**
+   * Return the TaskInProgress that matches the tipid.
+   */
+  public TaskInProgress getTaskInProgress(String tipid){
+    for (int i = 0; i < maps.length; i++) {
+      if (tipid.equals(maps[i].getTIPId())){
+        return maps[i];
+      }               
+    }
+    for (int i = 0; i < reduces.length; i++) {
+      if (tipid.equals(reduces[i].getTIPId())){
+        return reduces[i];
       }
     }
-
-    /**
-     * Return the TaskInProgress that matches the tipid.
-     */
-    public TaskInProgress getTaskInProgress(String tipid){
-      for (int i = 0; i < maps.length; i++) {
-        if (tipid.equals(maps[i].getTIPId())){
-          return maps[i];
-        }               
-      }
-      for (int i = 0; i < reduces.length; i++) {
-        if (tipid.equals(reduces[i].getTIPId())){
-          return reduces[i];
+    return null;
+  }
+    
+  /**
+   * Find the details of someplace where a map has finished
+   * @param mapId the id of the map
+   * @return the task status of the completed task
+   */
+  public TaskStatus findFinishedMap(int mapId) {
+    TaskInProgress tip = maps[mapId];
+    if (tip.isComplete()) {
+      TaskStatus[] statuses = tip.getTaskStatuses();
+      for(int i=0; i < statuses.length; i++) {
+        if (statuses[i].getRunState() == TaskStatus.State.SUCCEEDED) {
+          return statuses[i];
         }
       }
-      return null;
     }
+    return null;
+  }
     
-    /**
-     * Find the details of someplace where a map has finished
-     * @param mapId the id of the map
-     * @return the task status of the completed task
-     */
-    public TaskStatus findFinishedMap(int mapId) {
-       TaskInProgress tip = maps[mapId];
-       if (tip.isComplete()) {
-         TaskStatus[] statuses = tip.getTaskStatuses();
-         for(int i=0; i < statuses.length; i++) {
-           if (statuses[i].getRunState() == TaskStatus.State.SUCCEEDED) {
-             return statuses[i];
-           }
-         }
-       }
-       return null;
-    }
-    
-    synchronized public TaskCompletionEvent[] getTaskCompletionEvents(
-            int fromEventId, int maxEvents) {
-      TaskCompletionEvent[] events = TaskCompletionEvent.EMPTY_ARRAY;
-      if( taskCompletionEvents.size() > fromEventId) {
-        int actualMax = Math.min(maxEvents, 
-            (taskCompletionEvents.size() - fromEventId));
-        events = (TaskCompletionEvent[])taskCompletionEvents.subList(
-            fromEventId, actualMax + fromEventId).toArray(events);        
-      }
-      return events; 
-    }
+  synchronized public TaskCompletionEvent[] getTaskCompletionEvents(
+                                                                    int fromEventId, int maxEvents) {
+    TaskCompletionEvent[] events = TaskCompletionEvent.EMPTY_ARRAY;
+    if( taskCompletionEvents.size() > fromEventId) {
+      int actualMax = Math.min(maxEvents, 
+                               (taskCompletionEvents.size() - fromEventId));
+      events = (TaskCompletionEvent[])taskCompletionEvents.subList(
+                                                                   fromEventId, actualMax + fromEventId).toArray(events);        
+    }
+    return events; 
+  }
 }

+ 85 - 85
src/java/org/apache/hadoop/mapred/JobProfile.java

@@ -30,101 +30,101 @@ import java.net.*;
  **************************************************/
 public class JobProfile implements Writable {
 
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (JobProfile.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new JobProfile(); }
-         });
-    }
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (JobProfile.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new JobProfile(); }
+       });
+  }
 
-    String user;
-    String jobid;
-    String jobFile;
-    String url;
-    String name;
+  String user;
+  String jobid;
+  String jobFile;
+  String url;
+  String name;
 
-    /**
-     * Construct an empty {@link JobProfile}.
-     */
-    public JobProfile() {
-    }
+  /**
+   * Construct an empty {@link JobProfile}.
+   */
+  public JobProfile() {
+  }
 
-    /**
-     * Construct a {@link JobProfile} the userid, jobid, 
-     * job config-file, job-details url and job name. 
-     * 
-     * @param user userid of the person who submitted the job.
-     * @param jobid id of the job.
-     * @param jobFile job configuration file. 
-     * @param url link to the web-ui for details of the job.
-     * @param name user-specified job name.
-     */
-    public JobProfile(String user, String jobid, String jobFile, String url,
-                      String name) {
-        this.user = user;
-        this.jobid = jobid;
-        this.jobFile = jobFile;
-        this.url = url;
-        this.name = name;
-    }
+  /**
+   * Construct a {@link JobProfile} the userid, jobid, 
+   * job config-file, job-details url and job name. 
+   * 
+   * @param user userid of the person who submitted the job.
+   * @param jobid id of the job.
+   * @param jobFile job configuration file. 
+   * @param url link to the web-ui for details of the job.
+   * @param name user-specified job name.
+   */
+  public JobProfile(String user, String jobid, String jobFile, String url,
+                    String name) {
+    this.user = user;
+    this.jobid = jobid;
+    this.jobFile = jobFile;
+    this.url = url;
+    this.name = name;
+  }
 
-    /**
-     * Get the user id.
-     */
-    public String getUser() {
-      return user;
-    }
+  /**
+   * Get the user id.
+   */
+  public String getUser() {
+    return user;
+  }
     
-    /**
-     * Get the job id.
-     */
-    public String getJobId() {
-        return jobid;
-    }
+  /**
+   * Get the job id.
+   */
+  public String getJobId() {
+    return jobid;
+  }
 
-    /**
-     * Get the configuration file for the job.
-     */
-    public String getJobFile() {
-        return jobFile;
-    }
+  /**
+   * Get the configuration file for the job.
+   */
+  public String getJobFile() {
+    return jobFile;
+  }
 
-    /**
-     * Get the link to the web-ui for details of the job.
-     */
-    public URL getURL() {
-        try {
-            return new URL(url.toString());
-        } catch (IOException ie) {
-            return null;
-        }
+  /**
+   * Get the link to the web-ui for details of the job.
+   */
+  public URL getURL() {
+    try {
+      return new URL(url.toString());
+    } catch (IOException ie) {
+      return null;
     }
+  }
 
-    /**
-     * Get the user-specified job name.
-     */
-    public String getJobName() {
-      return name;
-    }
+  /**
+   * Get the user-specified job name.
+   */
+  public String getJobName() {
+    return name;
+  }
     
-    ///////////////////////////////////////
-    // Writable
-    ///////////////////////////////////////
-    public void write(DataOutput out) throws IOException {
-        UTF8.writeString(out, jobid);
-        UTF8.writeString(out, jobFile);
-        UTF8.writeString(out, url);
-        UTF8.writeString(out, user);
-        UTF8.writeString(out, name);
-    }
-    public void readFields(DataInput in) throws IOException {
-        this.jobid = UTF8.readString(in);
-        this.jobFile = UTF8.readString(in);
-        this.url = UTF8.readString(in);
-        this.user = UTF8.readString(in);
-        this.name = UTF8.readString(in);
-    }
+  ///////////////////////////////////////
+  // Writable
+  ///////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    UTF8.writeString(out, jobid);
+    UTF8.writeString(out, jobFile);
+    UTF8.writeString(out, url);
+    UTF8.writeString(out, user);
+    UTF8.writeString(out, name);
+  }
+  public void readFields(DataInput in) throws IOException {
+    this.jobid = UTF8.readString(in);
+    this.jobFile = UTF8.readString(in);
+    this.url = UTF8.readString(in);
+    this.user = UTF8.readString(in);
+    this.name = UTF8.readString(in);
+  }
 }
 
 

Деякі файли не було показано, через те що забагато файлів було змінено