浏览代码

HADOOP-1148. More re-indentation.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@529756 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 年之前
父节点
当前提交
6c9e0de4b7
共有 43 个文件被更改,包括 880 次插入880 次删除
  1. 12 12
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
  2. 11 11
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
  3. 1 1
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
  4. 20 20
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
  5. 7 7
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java
  6. 8 8
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java
  7. 2 2
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
  8. 39 39
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
  9. 12 12
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
  10. 13 13
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
  11. 2 2
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
  12. 1 1
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
  13. 38 38
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
  14. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java
  15. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
  16. 46 46
      src/java/org/apache/hadoop/dfs/DataStorage.java
  17. 4 4
      src/java/org/apache/hadoop/dfs/DatanodeID.java
  18. 3 3
      src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java
  19. 26 26
      src/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
  20. 48 48
      src/java/org/apache/hadoop/fs/s3/S3InputStream.java
  21. 51 51
      src/java/org/apache/hadoop/fs/s3/S3OutputStream.java
  22. 1 1
      src/java/org/apache/hadoop/io/ObjectWritable.java
  23. 1 1
      src/java/org/apache/hadoop/mapred/ClusterStatus.java
  24. 13 13
      src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java
  25. 143 143
      src/java/org/apache/hadoop/mapred/JobHistory.java
  26. 3 3
      src/java/org/apache/hadoop/mapred/LineRecordReader.java
  27. 46 46
      src/java/org/apache/hadoop/mapred/PhasedFileSystem.java
  28. 127 127
      src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java
  29. 6 6
      src/java/org/apache/hadoop/mapred/TaskLogAppender.java
  30. 8 8
      src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java
  31. 5 5
      src/java/org/apache/hadoop/record/compiler/CodeGenerator.java
  32. 23 23
      src/java/org/apache/hadoop/util/DiskChecker.java
  33. 1 1
      src/java/org/apache/hadoop/util/HostsFileReader.java
  34. 5 5
      src/java/org/apache/hadoop/util/Progressable.java
  35. 60 60
      src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java
  36. 1 1
      src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java
  37. 11 11
      src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java
  38. 1 1
      src/test/org/apache/hadoop/fs/s3/TestINode.java
  39. 1 1
      src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java
  40. 10 10
      src/test/org/apache/hadoop/io/retry/TestRetryProxy.java
  41. 7 7
      src/test/org/apache/hadoop/net/TestNetworkTopology.java
  42. 59 59
      src/test/org/apache/hadoop/util/TestReflectionUtils.java
  43. 2 2
      src/test/testjar/ExternalMapperReducer.java

+ 12 - 12
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java

@@ -180,7 +180,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
    * @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
    */
   public boolean next(HStoreKey key, TreeMap<Text, byte[]> results)
-      throws IOException {
+    throws IOException {
  
     // Find the next row label (and timestamp)
  
@@ -188,12 +188,12 @@ public abstract class HAbstractScanner implements HScannerInterface {
     long chosenTimestamp = -1;
     for(int i = 0; i < keys.length; i++) {
       while((keys[i] != null)
-          && (columnMatch(i))
-          && (keys[i].getTimestamp() <= this.timestamp)
-          && ((chosenRow == null)
-              || (keys[i].getRow().compareTo(chosenRow) < 0)
-              || ((keys[i].getRow().compareTo(chosenRow) == 0)
-                  && (keys[i].getTimestamp() > chosenTimestamp)))) {
+            && (columnMatch(i))
+            && (keys[i].getTimestamp() <= this.timestamp)
+            && ((chosenRow == null)
+                || (keys[i].getRow().compareTo(chosenRow) < 0)
+                || ((keys[i].getRow().compareTo(chosenRow) == 0)
+                    && (keys[i].getTimestamp() > chosenTimestamp)))) {
 
         chosenRow = new Text(keys[i].getRow());
         chosenTimestamp = keys[i].getTimestamp();
@@ -212,8 +212,8 @@ public abstract class HAbstractScanner implements HScannerInterface {
         // Fetch the data
         
         while((keys[i] != null)
-            && (keys[i].getRow().compareTo(chosenRow) == 0)
-            && (keys[i].getTimestamp() == chosenTimestamp)) {
+              && (keys[i].getRow().compareTo(chosenRow) == 0)
+              && (keys[i].getTimestamp() == chosenTimestamp)) {
 
           if(columnMatch(i)) {
             outbuf.reset();
@@ -235,9 +235,9 @@ public abstract class HAbstractScanner implements HScannerInterface {
         // a valid timestamp, so we're ready next time.
         
         while((keys[i] != null)
-            && ((keys[i].getRow().compareTo(chosenRow) <= 0)
-                || (keys[i].getTimestamp() > this.timestamp)
-                || (! columnMatch(i)))) {
+              && ((keys[i].getRow().compareTo(chosenRow) <= 0)
+                  || (keys[i].getTimestamp() > this.timestamp)
+                  || (! columnMatch(i)))) {
 
           getNext(i);
         }

+ 11 - 11
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java

@@ -146,8 +146,8 @@ public class HClient extends HGlobals implements HConstants {
   private void locateRootRegion() throws IOException {
     if(master == null) {
       master = (HMasterInterface)RPC.getProxy(HMasterInterface.class, 
-                   HMasterInterface.versionID,
-                   masterLocation.getInetSocketAddress(), conf);
+                                              HMasterInterface.versionID,
+                                              masterLocation.getInetSocketAddress(), conf);
     }
     
     int tries = 0;
@@ -229,7 +229,7 @@ public class HClient extends HGlobals implements HConstants {
         String serverName = new String(serverBytes, UTF8_ENCODING);
           
         tableServers.put(regionInfo.startKey, 
-            new TableInfo(regionInfo, new HServerAddress(serverName)));
+                         new TableInfo(regionInfo, new HServerAddress(serverName)));
 
         results.clear();
       }
@@ -239,16 +239,16 @@ public class HClient extends HGlobals implements HConstants {
   }
 
   public synchronized HRegionInterface getHRegionConnection(HServerAddress regionServer)
-      throws IOException {
+    throws IOException {
 
-      // See if we already have a connection
+    // See if we already have a connection
 
     HRegionInterface server = servers.get(regionServer.toString());
     
     if(server == null) {                                // Get a connection
       
       server = (HRegionInterface)RPC.waitForProxy(HRegionInterface.class, 
-          HRegionInterface.versionID, regionServer.getInetSocketAddress(), conf);
+                                                  HRegionInterface.versionID, regionServer.getInetSocketAddress(), conf);
       
       servers.put(regionServer.toString(), server);
     }
@@ -325,14 +325,14 @@ public class HClient extends HGlobals implements HConstants {
   public byte[] get(Text row, Text column) throws IOException {
     TableInfo info = getTableInfo(row);
     return getHRegionConnection(info.serverAddress).get(
-        info.regionInfo.regionName, row, column).get();
+                                                        info.regionInfo.regionName, row, column).get();
   }
  
   /** Get the specified number of versions of the specified row and column */
   public byte[][] get(Text row, Text column, int numVersions) throws IOException {
     TableInfo info = getTableInfo(row);
     BytesWritable[] values = getHRegionConnection(info.serverAddress).get(
-        info.regionInfo.regionName, row, column, numVersions);
+                                                                          info.regionInfo.regionName, row, column, numVersions);
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
     for(int i = 0 ; i < values.length; i++) {
@@ -348,7 +348,7 @@ public class HClient extends HGlobals implements HConstants {
   public byte[][] get(Text row, Text column, long timestamp, int numVersions) throws IOException {
     TableInfo info = getTableInfo(row);
     BytesWritable[] values = getHRegionConnection(info.serverAddress).get(
-        info.regionInfo.regionName, row, column, timestamp, numVersions);
+                                                                          info.regionInfo.regionName, row, column, timestamp, numVersions);
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
     for(int i = 0 ; i < values.length; i++) {
@@ -361,7 +361,7 @@ public class HClient extends HGlobals implements HConstants {
   public LabelledData[] getRow(Text row) throws IOException {
     TableInfo info = getTableInfo(row);
     return getHRegionConnection(info.serverAddress).getRow(
-        info.regionInfo.regionName, row);
+                                                           info.regionInfo.regionName, row);
   }
 
   /** 
@@ -492,7 +492,7 @@ public class HClient extends HGlobals implements HConstants {
       try {
         server = getHRegionConnection(regions[currentRegion].serverAddress);
         scanner = server.openScanner(regions[currentRegion].regionInfo.regionName,
-            columns, startRow);
+                                     columns, startRow);
         
       } catch(IOException e) {
         close();

+ 1 - 1
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java

@@ -342,7 +342,7 @@ public class HLog {
     }
     
     writer.append(new HLogKey(regionName, tableName, HLog.METAROW, logSeqId),
-        new HLogEdit(HLog.METACOLUMN, HStoreKey.COMPLETE_CACHEFLUSH, System.currentTimeMillis()));
+                  new HLogEdit(HLog.METACOLUMN, HStoreKey.COMPLETE_CACHEFLUSH, System.currentTimeMillis()));
     numEntries++;
 
     // Remember the most-recent flush for each region.

+ 20 - 20
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java

@@ -104,7 +104,7 @@ public class HMaster extends HGlobals
     
     public void run() {
       Text cols[] = {
-          ROOT_COLUMN_FAMILY
+        ROOT_COLUMN_FAMILY
       };
       Text firstRow = new Text();
   
@@ -156,7 +156,7 @@ public class HMaster extends HGlobals
             synchronized(serversToServerInfo) {
               storedInfo = serversToServerInfo.get(serverName);
               if(storedInfo == null
-                  || storedInfo.getStartCode() != startCode) {
+                 || storedInfo.getStartCode() != startCode) {
               
                 // The current assignment is no good; load the region.
   
@@ -216,7 +216,7 @@ public class HMaster extends HGlobals
    */
   private class MetaScanner implements Runnable {
     private final Text cols[] = {
-        META_COLUMN_FAMILY
+      META_COLUMN_FAMILY
     };
     private final Text firstRow = new Text();
     
@@ -262,7 +262,7 @@ public class HMaster extends HGlobals
           synchronized(serversToServerInfo) {
             storedInfo = serversToServerInfo.get(serverName);
             if(storedInfo == null
-                || storedInfo.getStartCode() != startCode) {
+               || storedInfo.getStartCode() != startCode) {
             
               // The current assignment is no good; load the region.
 
@@ -370,8 +370,8 @@ public class HMaster extends HGlobals
   /** Build the HMaster out of a raw configuration item. */
   public HMaster(Configuration conf) throws IOException {
     this(new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR)),
-        new HServerAddress(conf.get(MASTER_DEFAULT_NAME)),
-        conf);
+         new HServerAddress(conf.get(MASTER_DEFAULT_NAME)),
+         conf);
   }
 
   /** 
@@ -410,9 +410,9 @@ public class HMaster extends HGlobals
     this.maxRegionOpenTime = conf.getLong("hbase.hbasemaster.maxregionopen", 30 * 1000);
     this.msgQueue = new Vector<PendingOperation>();
     this.serverLeases = new Leases(conf.getLong("hbase.master.lease.period", 15 * 1000), 
-        conf.getLong("hbase.master.lease.thread.wakefrequency", 15 * 1000));
+                                   conf.getLong("hbase.master.lease.thread.wakefrequency", 15 * 1000));
     this.server = RPC.getServer(this, address.getBindAddress(),
-        address.getPort(), conf.getInt("hbase.hregionserver.handler.count", 10), false, conf);
+                                address.getPort(), conf.getInt("hbase.hregionserver.handler.count", 10), false, conf);
     this.client = new HClient(conf);
     
     this.metaRescanInterval
@@ -714,7 +714,7 @@ public class HMaster extends HGlobals
           
         default:
           throw new IOException("Impossible state during msg processing.  Instruction: "
-              + incomingMsgs[i].getMsg());
+                                + incomingMsgs[i].getMsg());
         }
       }
 
@@ -725,13 +725,13 @@ public class HMaster extends HGlobals
         // Open new regions as necessary
 
         int targetForServer = (int) Math.ceil(unassignedRegions.size()
-            / (1.0 * serversToServerInfo.size()));
+                                              / (1.0 * serversToServerInfo.size()));
 
         int counter = 0;
         long now = System.currentTimeMillis();
 
         for(Iterator<Text> it = unassignedRegions.keySet().iterator();
-        it.hasNext(); ) {
+            it.hasNext(); ) {
 
           Text curRegionName = it.next();
           HRegionInfo regionInfo = unassignedRegions.get(curRegionName);
@@ -790,7 +790,7 @@ public class HMaster extends HGlobals
 
   abstract class PendingOperation {
     protected final Text[] columns = {
-        META_COLUMN_FAMILY
+      META_COLUMN_FAMILY
     };
     protected final Text startRow = new Text();
     protected long clientId;
@@ -813,7 +813,7 @@ public class HMaster extends HGlobals
     }
     
     private void scanMetaRegion(HRegionInterface server, HScannerInterface scanner,
-        Text regionName) throws IOException {
+                                Text regionName) throws IOException {
 
       Vector<HStoreKey> toDoList = new Vector<HStoreKey>();
       TreeMap<Text, HRegionInfo> regions = new TreeMap<Text, HRegionInfo>();
@@ -899,7 +899,7 @@ public class HMaster extends HGlobals
       
       HRegionInterface server = client.getHRegionConnection(rootRegionLocation);
       HScannerInterface scanner = server.openScanner(rootRegionInfo.regionName,
-          columns, startRow);
+                                                     columns, startRow);
       
       scanMetaRegion(server, scanner, rootRegionInfo.regionName);
       for(Iterator<MetaRegion> i = knownMetaRegions.values().iterator();
@@ -1003,10 +1003,10 @@ public class HMaster extends HGlobals
       
       try {
         this.serverAddress = new BytesWritable(
-            info.getServerAddress().toString().getBytes(UTF8_ENCODING));
+                                               info.getServerAddress().toString().getBytes(UTF8_ENCODING));
         
         this.startCode = new BytesWritable(
-            String.valueOf(info.getStartCode()).getBytes(UTF8_ENCODING));
+                                           String.valueOf(info.getStartCode()).getBytes(UTF8_ENCODING));
         
       } catch(UnsupportedEncodingException e) {
       }
@@ -1100,7 +1100,7 @@ public class HMaster extends HGlobals
     long clientId = rand.nextLong();
     long lockid = server.startUpdate(metaRegionName, clientId, regionName);
     server.put(metaRegionName, clientId, lockid, META_COL_REGIONINFO, 
-        new BytesWritable(byteValue.toByteArray()));
+               new BytesWritable(byteValue.toByteArray()));
     server.commit(metaRegionName, clientId, lockid);
     
     // 4. Get it assigned to a server
@@ -1122,14 +1122,14 @@ public class HMaster extends HGlobals
    * @throws IOException
    */
   private HRegion createNewHRegion(HTableDescriptor desc, long regionId) 
-      throws IOException {
+    throws IOException {
     
     HRegionInfo info = new HRegionInfo(regionId, desc, null, null);
     Path regionDir = HStoreFile.getHRegionDir(dir, info.regionName);
     fs.mkdirs(regionDir);
 
     return new HRegion(dir, new HLog(fs, new Path(regionDir, "log"), conf), fs,
-        conf, info, null, null);
+                       conf, info, null, null);
   }
   
   /**
@@ -1168,7 +1168,7 @@ public class HMaster extends HGlobals
   
   public void deleteTable(Text tableName) throws IOException {
     Text[] columns = {
-        META_COLUMN_FAMILY
+      META_COLUMN_FAMILY
     };
     
     // We can not access any meta region if they have not already been assigned

+ 7 - 7
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java

@@ -31,10 +31,10 @@ public class HMemcache {
   private static final Log LOG = LogFactory.getLog(HMemcache.class);
   
   TreeMap<HStoreKey, BytesWritable> memcache 
-      = new TreeMap<HStoreKey, BytesWritable>();
+    = new TreeMap<HStoreKey, BytesWritable>();
   
   Vector<TreeMap<HStoreKey, BytesWritable>> history 
-      = new Vector<TreeMap<HStoreKey, BytesWritable>>();
+    = new Vector<TreeMap<HStoreKey, BytesWritable>>();
   
   TreeMap<HStoreKey, BytesWritable> snapshot = null;
 
@@ -199,7 +199,7 @@ public class HMemcache {
   }
   
   void internalGetFull(TreeMap<HStoreKey, BytesWritable> map, HStoreKey key, 
-      TreeMap<Text, byte[]> results) {
+                       TreeMap<Text, byte[]> results) {
     
     SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(key);
     
@@ -208,7 +208,7 @@ public class HMemcache {
       Text itCol = itKey.getColumn();
 
       if(results.get(itCol) == null
-          && key.matchesWithoutColumn(itKey)) {
+         && key.matchesWithoutColumn(itKey)) {
         BytesWritable val = tailMap.get(itKey);
         results.put(itCol, val.get());
         
@@ -251,7 +251,7 @@ public class HMemcache {
    * Return a scanner over the keys in the HMemcache
    */
   public HScannerInterface getScanner(long timestamp, Text targetCols[], Text firstRow)
-      throws IOException {
+    throws IOException {
     
     return new HMemcacheScanner(timestamp, targetCols, firstRow);
   }
@@ -266,8 +266,8 @@ public class HMemcache {
     Iterator<HStoreKey> keyIterators[];
 
     @SuppressWarnings("unchecked")
-    public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
-        throws IOException {
+      public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
+      throws IOException {
       
       super(timestamp, targetCols);
       

+ 8 - 8
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMsg.java

@@ -67,13 +67,13 @@ public class HMsg implements Writable {
   // Writable
   //////////////////////////////////////////////////////////////////////////////
 
-   public void write(DataOutput out) throws IOException {
-     out.writeByte(msg);
-     info.write(out);
-   }
+  public void write(DataOutput out) throws IOException {
+    out.writeByte(msg);
+    info.write(out);
+  }
 
-   public void readFields(DataInput in) throws IOException {
-     this.msg = in.readByte();
-     this.info.readFields(in);
-   }
+  public void readFields(DataInput in) throws IOException {
+    this.msg = in.readByte();
+    this.info.readFields(in);
+  }
 }

+ 2 - 2
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java

@@ -38,7 +38,7 @@ public class HRegionInfo implements Writable {
   }
 
   public HRegionInfo(long regionId, HTableDescriptor tableDesc, Text startKey, 
-      Text endKey) throws IllegalArgumentException {
+                     Text endKey) throws IllegalArgumentException {
     
     this.regionId = regionId;
     
@@ -59,7 +59,7 @@ public class HRegionInfo implements Writable {
     }
     
     this.regionName = new Text(tableDesc.getName() + "_"
-        + (startKey == null ? "" : startKey.toString()) + "_" + regionId);
+                               + (startKey == null ? "" : startKey.toString()) + "_" + regionId);
   }
     
   //////////////////////////////////////////////////////////////////////////////

+ 39 - 39
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java

@@ -111,7 +111,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
 
             Text tableToUpdate
               = (oldRegion.find(META_TABLE_NAME.toString()) == 0)
-                ? ROOT_TABLE_NAME : META_TABLE_NAME;
+              ? ROOT_TABLE_NAME : META_TABLE_NAME;
 
             client.openTable(tableToUpdate);
             long lockid = client.startUpdate(oldRegion);
@@ -249,13 +249,13 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   /** Start a HRegionServer at the default location */
   public HRegionServer(Configuration conf) throws IOException {
     this(new Path(conf.get(HREGION_DIR, DEFAULT_HREGION_DIR)),
-        new HServerAddress(conf.get("hbase.regionserver.default.name")),
-        conf);
+         new HServerAddress(conf.get("hbase.regionserver.default.name")),
+         conf);
   }
   
   /** Start a HRegionServer at an indicated location */
   public HRegionServer(Path regionDir, HServerAddress address, Configuration conf) 
-      throws IOException {
+    throws IOException {
     
     // Basic setup
     
@@ -302,10 +302,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
       // Remote HMaster
 
       this.hbaseMaster = (HMasterRegionInterface)
-      RPC.waitForProxy(HMasterRegionInterface.class,
-          HMasterRegionInterface.versionId,
-          new HServerAddress(conf.get(MASTER_DEFAULT_NAME)).getInetSocketAddress(),
-          conf);
+        RPC.waitForProxy(HMasterRegionInterface.class,
+                         HMasterRegionInterface.versionId,
+                         new HServerAddress(conf.get(MASTER_DEFAULT_NAME)).getInetSocketAddress(),
+                         conf);
 
       // Threads
 
@@ -313,12 +313,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
       this.splitCheckerThread.start();
       this.logRollerThread.start();
       this.leases = new Leases(conf.getLong("hbase.hregionserver.lease.period", 
-          3 * 60 * 1000), threadWakeFrequency);
+                                            3 * 60 * 1000), threadWakeFrequency);
       
       // Server
 
       this.server = RPC.getServer(this, address.getBindAddress().toString(), 
-          address.getPort(), conf.getInt("hbase.hregionserver.handler.count", 10), false, conf);
+                                  address.getPort(), conf.getInt("hbase.hregionserver.handler.count", 10), false, conf);
       this.server.start();
 
     } catch(IOException e) {
@@ -523,7 +523,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   }
 
   private void closeRegion(HRegionInfo info, boolean reportWhenCompleted)
-      throws IOException {
+    throws IOException {
     
     locking.obtainWriteLock();
     try {
@@ -580,24 +580,24 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
    *        
    *        For now, we do not do merging. Splits are driven by the HRegionServer.
    ****************************************************************************/
-/*
-  private void mergeRegions(Text regionNameA, Text regionNameB) throws IOException {
+  /*
+    private void mergeRegions(Text regionNameA, Text regionNameB) throws IOException {
     locking.obtainWriteLock();
     try {
-      HRegion srcA = regions.remove(regionNameA);
-      HRegion srcB = regions.remove(regionNameB);
-      HRegion newRegion = HRegion.closeAndMerge(srcA, srcB);
-      regions.put(newRegion.getRegionName(), newRegion);
-
-      reportClose(srcA);
-      reportClose(srcB);
-      reportOpen(newRegion);
+    HRegion srcA = regions.remove(regionNameA);
+    HRegion srcB = regions.remove(regionNameB);
+    HRegion newRegion = HRegion.closeAndMerge(srcA, srcB);
+    regions.put(newRegion.getRegionName(), newRegion);
+
+    reportClose(srcA);
+    reportClose(srcB);
+    reportOpen(newRegion);
       
     } finally {
-      locking.releaseWriteLock();
+    locking.releaseWriteLock();
     }
-  }
-*/
+    }
+  */
 
   //////////////////////////////////////////////////////////////////////////////
   // HRegionInterface
@@ -614,7 +614,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
 
   /** Start a scanner for a given HRegion. */
   public HScannerInterface openScanner(Text regionName, Text[] cols, 
-      Text firstRow) throws IOException {
+                                       Text firstRow) throws IOException {
 
     HRegion r = getRegion(regionName);
     if(r == null) {
@@ -639,7 +639,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
 
   /** Get multiple versions of the indicated row/col */
   public BytesWritable[] get(Text regionName, Text row, Text column, 
-      int numVersions) throws IOException {
+                             int numVersions) throws IOException {
     
     HRegion region = getRegion(regionName);
     if(region == null) {
@@ -661,7 +661,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
 
   /** Get multiple timestamped versions of the indicated row/col */
   public BytesWritable[] get(Text regionName, Text row, Text column, 
-      long timestamp, int numVersions) throws IOException {
+                             long timestamp, int numVersions) throws IOException {
     
     HRegion region = getRegion(regionName);
     if(region == null) {
@@ -723,7 +723,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   }
   
   public long startUpdate(Text regionName, long clientid, Text row) 
-      throws IOException {
+    throws IOException {
     
     HRegion region = getRegion(regionName);
     if(region == null) {
@@ -732,15 +732,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     
     long lockid = region.startUpdate(row);
     leases.createLease(new Text(String.valueOf(clientid)), 
-        new Text(String.valueOf(lockid)), 
-        new RegionListener(region, lockid));
+                       new Text(String.valueOf(lockid)), 
+                       new RegionListener(region, lockid));
     
     return lockid;
   }
 
   /** Add something to the HBase. */
   public void put(Text regionName, long clientid, long lockid, Text column, 
-      BytesWritable val) throws IOException {
+                  BytesWritable val) throws IOException {
     
     HRegion region = getRegion(regionName);
     if(region == null) {
@@ -748,14 +748,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     }
     
     leases.renewLease(new Text(String.valueOf(clientid)), 
-        new Text(String.valueOf(lockid)));
+                      new Text(String.valueOf(lockid)));
     
     region.put(lockid, column, val.get());
   }
 
   /** Remove a cell from the HBase. */
   public void delete(Text regionName, long clientid, long lockid, Text column) 
-      throws IOException {
+    throws IOException {
     
     HRegion region = getRegion(regionName);
     if(region == null) {
@@ -763,14 +763,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     }
     
     leases.renewLease(new Text(String.valueOf(clientid)), 
-        new Text(String.valueOf(lockid)));
+                      new Text(String.valueOf(lockid)));
     
     region.delete(lockid, column);
   }
 
   /** Abandon the transaction */
   public void abort(Text regionName, long clientid, long lockid) 
-      throws IOException {
+    throws IOException {
     
     HRegion region = getRegion(regionName);
     if(region == null) {
@@ -778,14 +778,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     }
     
     leases.cancelLease(new Text(String.valueOf(clientid)), 
-        new Text(String.valueOf(lockid)));
+                       new Text(String.valueOf(lockid)));
     
     region.abort(lockid);
   }
 
   /** Confirm the transaction */
   public void commit(Text regionName, long clientid, long lockid) 
-      throws IOException {
+    throws IOException {
     
     HRegion region = getRegion(regionName);
     if(region == null) {
@@ -793,7 +793,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     }
     
     leases.cancelLease(new Text(String.valueOf(clientid)), 
-        new Text(String.valueOf(lockid)));
+                       new Text(String.valueOf(lockid)));
     
     region.commit(lockid);
   }
@@ -801,7 +801,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   /** Don't let the client's lease expire just yet...  */
   public void renewLease(long lockid, long clientid) throws IOException {
     leases.renewLease(new Text(String.valueOf(clientid)), 
-        new Text(String.valueOf(lockid)));
+                      new Text(String.valueOf(lockid)));
   }
 
   /** Private utility method for safely obtaining an HRegion handle. */

+ 12 - 12
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java

@@ -88,7 +88,7 @@ public class HStore {
    * will be deleted (by whoever has instantiated the HStore).
    */
   public HStore(Path dir, Text regionName, Text colFamily, int maxVersions, 
-      FileSystem fs, Path reconstructionLog, Configuration conf) throws IOException {
+                FileSystem fs, Path reconstructionLog, Configuration conf) throws IOException {
     
     this.dir = dir;
     this.regionName = regionName;
@@ -174,7 +174,7 @@ public class HStore {
             continue;
           }
           reconstructedCache.put(new HStoreKey(key.getRow(), val.getColumn(), 
-              val.getTimestamp()), val.getVal());
+                                               val.getTimestamp()), val.getVal());
         }
         
       } finally {
@@ -252,13 +252,13 @@ public class HStore {
    * Return the entire list of HStoreFiles currently used by the HStore.
    */
   public Vector<HStoreFile> flushCache(TreeMap<HStoreKey, BytesWritable> inputCache,
-      long logCacheFlushId) throws IOException {
+                                       long logCacheFlushId) throws IOException {
     
     return flushCacheHelper(inputCache, logCacheFlushId, true);
   }
   
   Vector<HStoreFile> flushCacheHelper(TreeMap<HStoreKey, BytesWritable> inputCache,
-      long logCacheFlushId, boolean addToAvailableMaps) throws IOException {
+                                      long logCacheFlushId, boolean addToAvailableMaps) throws IOException {
     
     synchronized(flushLock) {
       LOG.debug("flushing HStore " + this.regionName + "/" + this.colFamily);
@@ -270,7 +270,7 @@ public class HStore {
       
       Path mapfile = flushedFile.getMapFilePath();
       MapFile.Writer out = new MapFile.Writer(conf, fs, mapfile.toString(), 
-          HStoreKey.class, BytesWritable.class);
+                                              HStoreKey.class, BytesWritable.class);
       
       try {
         for(Iterator<HStoreKey> it = inputCache.keySet().iterator(); it.hasNext(); ) {
@@ -392,8 +392,8 @@ public class HStore {
         // Step through them, writing to the brand-new TreeMap
 
         MapFile.Writer compactedOut = new MapFile.Writer(conf, fs, 
-            compactedOutputFile.getMapFilePath().toString(), HStoreKey.class, 
-            BytesWritable.class);
+                                                         compactedOutputFile.getMapFilePath().toString(), HStoreKey.class, 
+                                                         BytesWritable.class);
         
         try {
 
@@ -464,7 +464,7 @@ public class HStore {
 
             HStoreKey sk = keys[smallestKey];
             if(lastRow.equals(sk.getRow())
-                && lastColumn.equals(sk.getColumn())) {
+               && lastColumn.equals(sk.getColumn())) {
               
               timesSeen++;
               
@@ -478,7 +478,7 @@ public class HStore {
               // Then just skip them.
 
               if(sk.getRow().getLength() != 0
-                  && sk.getColumn().getLength() != 0) {
+                 && sk.getColumn().getLength() != 0) {
                 
                 // Only write out objects which have a non-zero length key and value
 
@@ -683,7 +683,7 @@ public class HStore {
       
       mapFiles.put(orderVal, finalCompactedFile);
       maps.put(orderVal, new MapFile.Reader(fs, 
-          finalCompactedFile.getMapFilePath().toString(), conf));
+                                            finalCompactedFile.getMapFilePath().toString(), conf));
       
     } finally {
       
@@ -721,7 +721,7 @@ public class HStore {
           do {
             Text readcol = readkey.getColumn();
             if(results.get(readcol) == null
-                && key.matchesWithoutColumn(readkey)) {
+               && key.matchesWithoutColumn(readkey)) {
               results.put(new Text(readcol), readval.get());
               readval = new BytesWritable();
               
@@ -850,7 +850,7 @@ public class HStore {
    * These should be closed after the user is done with them.
    */
   public HScannerInterface getScanner(long timestamp, Text targetCols[],
-      Text firstRow) throws IOException {
+                                      Text firstRow) throws IOException {
     
     return new HStoreScanner(timestamp, targetCols, firstRow);
   }

+ 13 - 13
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java

@@ -61,7 +61,7 @@ public class HStoreFile implements HConstants, WritableComparable {
   }
   
   public HStoreFile(Configuration conf, Path dir, Text regionName, 
-      Text colFamily, long fileId) {
+                    Text colFamily, long fileId) {
     
     this.conf = conf;
     this.dir = dir;
@@ -92,12 +92,12 @@ public class HStoreFile implements HConstants, WritableComparable {
   
   public Path getMapFilePath() {
     return new Path(HStoreFile.getMapDir(dir, regionName, colFamily), 
-        HSTORE_DATFILE_PREFIX + fileId);
+                    HSTORE_DATFILE_PREFIX + fileId);
   }
   
   public Path getInfoFilePath() {
     return new Path(HStoreFile.getInfoDir(dir, regionName, colFamily), 
-        HSTORE_INFOFILE_PREFIX + fileId);
+                    HSTORE_INFOFILE_PREFIX + fileId);
   }
 
   // Static methods to build partial paths to internal directories.  Useful for 
@@ -105,17 +105,17 @@ public class HStoreFile implements HConstants, WritableComparable {
   
   public static Path getMapDir(Path dir, Text regionName, Text colFamily) {
     return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName, 
-        new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
+                                  new Path(colFamily.toString(), HSTORE_DATFILE_DIR)));
   }
 
   public static Path getInfoDir(Path dir, Text regionName, Text colFamily) {
     return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName, 
-        new Path(colFamily.toString(), HSTORE_INFO_DIR)));
+                                  new Path(colFamily.toString(), HSTORE_INFO_DIR)));
   }
 
   public static Path getHStoreDir(Path dir, Text regionName, Text colFamily) {
     return new Path(dir, new Path(HREGIONDIR_PREFIX + regionName, 
-        colFamily.toString()));
+                                  colFamily.toString()));
   }
 
   public static Path getHRegionDir(Path dir, Text regionName) {
@@ -127,7 +127,7 @@ public class HStoreFile implements HConstants, WritableComparable {
    * filesystem if the file already exists.
    */
   static HStoreFile obtainNewHStoreFile(Configuration conf, Path dir, 
-      Text regionName, Text colFamily, FileSystem fs) throws IOException {
+                                        Text regionName, Text colFamily, FileSystem fs) throws IOException {
     
     Path mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
     long fileId = Math.abs(rand.nextLong());
@@ -149,7 +149,7 @@ public class HStoreFile implements HConstants, WritableComparable {
    * If only one exists, we'll delete it.
    */
   static Vector<HStoreFile> loadHStoreFiles(Configuration conf, Path dir, 
-      Text regionName, Text colFamily, FileSystem fs) throws IOException {
+                                            Text regionName, Text colFamily, FileSystem fs) throws IOException {
     
     Vector<HStoreFile> results = new Vector<HStoreFile>();
     Path mapdir = HStoreFile.getMapDir(dir, regionName, colFamily);
@@ -200,18 +200,18 @@ public class HStoreFile implements HConstants, WritableComparable {
    * brand-new HRegions.
    */
   public void splitStoreFile(Text midKey, HStoreFile dstA, HStoreFile dstB,
-      FileSystem fs, Configuration conf) throws IOException {
+                             FileSystem fs, Configuration conf) throws IOException {
 
     // Copy the appropriate tuples to one MapFile or the other.
 
     MapFile.Reader in = new MapFile.Reader(fs, getMapFilePath().toString(), conf);
     try {
       MapFile.Writer outA = new MapFile.Writer(conf, fs, 
-          dstA.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
+                                               dstA.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
       
       try {
         MapFile.Writer outB = new MapFile.Writer(conf, fs, 
-            dstB.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
+                                                 dstB.getMapFilePath().toString(), HStoreKey.class, BytesWritable.class);
         
         try {
           HStoreKey readkey = new HStoreKey();
@@ -252,12 +252,12 @@ public class HStoreFile implements HConstants, WritableComparable {
    * We are merging multiple regions into a single new one.
    */
   public void mergeStoreFiles(Vector<HStoreFile> srcFiles, FileSystem fs, 
-      Configuration conf) throws IOException {
+                              Configuration conf) throws IOException {
 
     // Copy all the source MapFile tuples into this HSF's MapFile
 
     MapFile.Writer out = new MapFile.Writer(conf, fs, getMapFilePath().toString(),
-        HStoreKey.class, BytesWritable.class);
+                                            HStoreKey.class, BytesWritable.class);
     
     try {
       for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {

+ 2 - 2
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java

@@ -95,7 +95,7 @@ public class HStoreKey implements WritableComparable {
   
   public boolean matchesRowCol(HStoreKey other) {
     if(this.row.compareTo(other.row) == 0 &&
-        this.column.compareTo(other.column) == 0) {
+       this.column.compareTo(other.column) == 0) {
       return true;
       
     } else {
@@ -105,7 +105,7 @@ public class HStoreKey implements WritableComparable {
   
   public boolean matchesWithoutColumn(HStoreKey other) {
     if((this.row.compareTo(other.row) == 0) &&
-        (this.timestamp >= other.getTimestamp())) {
+       (this.timestamp >= other.getTimestamp())) {
       return true;
       
     } else {

+ 1 - 1
src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java

@@ -137,7 +137,7 @@ public class Leases {
           synchronized(sortedLeases) {
             Lease top;
             while((sortedLeases.size() > 0)
-                && ((top = sortedLeases.first()) != null)) {
+                  && ((top = sortedLeases.first()) != null)) {
               
               if(top.shouldExpire()) {
                 leases.remove(top.getLeaseId());

+ 38 - 38
src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java

@@ -103,7 +103,7 @@ public class TestHRegion extends TestCase {
         rootLogger.setLevel(Level.WARN);
         
         PatternLayout consoleLayout
-            = (PatternLayout)rootLogger.getAppender("console").getLayout();
+          = (PatternLayout)rootLogger.getAppender("console").getLayout();
         consoleLayout.setConversionPattern("%d %-5p [%t] %l: %m%n");
       
         Logger.getLogger("org.apache.hadoop.hbase").setLevel(Environment.logLevel);
@@ -121,7 +121,7 @@ public class TestHRegion extends TestCase {
       desc.addFamily(new Text("contents"));
       desc.addFamily(new Text("anchor"));
       region = new HRegion(parentdir, log, fs, conf, 
-          new HRegionInfo(1, desc, null, null), null, oldlogfile);
+                           new HRegionInfo(1, desc, null, null), null, oldlogfile);
       
     } catch(IOException e) {
       failures = true;
@@ -160,27 +160,27 @@ public class TestHRegion extends TestCase {
         String bodystr = new String(bodydata).toString().trim();
         String teststr = CONTENTSTR + k;
         assertEquals("Incorrect value for key: (" + rowlabel + "," + CONTENTS_BASIC
-            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
-            bodystr, teststr);
+                     + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+                     bodystr, teststr);
         collabel = new Text(ANCHORNUM + k);
         bodydata = region.get(rowlabel, collabel);
         bodystr = new String(bodydata).toString().trim();
         teststr = ANCHORSTR + k;
         assertEquals("Incorrect value for key: (" + rowlabel + "," + collabel
-            + "), expected: '" + teststr + "' got: '" + bodystr + "'",
-            bodystr, teststr);
-/*
+                     + "), expected: '" + teststr + "' got: '" + bodystr + "'",
+                     bodystr, teststr);
+        /*
         // Check to make sure that null values are actually null
         for (int j = 0; j < Math.min(15, NUM_VALS); j++) {
-          if (k != j) {
-            collabel = new Text(ANCHORNUM + j);
-            byte results[] = region.get(rowlabel, collabel);
-            if (results != null) {
-              throw new IOException("Found incorrect value at [" + rowlabel + ", " + collabel + "] == " + new String(results).toString().trim());
-            }
-          }
+        if (k != j) {
+        collabel = new Text(ANCHORNUM + j);
+        byte results[] = region.get(rowlabel, collabel);
+        if (results != null) {
+        throw new IOException("Found incorrect value at [" + rowlabel + ", " + collabel + "] == " + new String(results).toString().trim());
+        }
         }
-*/
+        }
+        */
       }
     } catch(IOException e) {
       failures = true;
@@ -196,8 +196,8 @@ public class TestHRegion extends TestCase {
     }
 
     Text cols[] = new Text[] {
-        CONTENTS_FIRSTCOL,
-        ANCHOR_SECONDCOL
+      CONTENTS_FIRSTCOL,
+      ANCHOR_SECONDCOL
     };
 
     // Test the Scanner!!!
@@ -233,8 +233,8 @@ public class TestHRegion extends TestCase {
           for(int j = 0; j < cols.length; j++) {
             if(col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                  + ", Value for " + col + " should be: " + k
-                  + ", but was fetched as: " + curval, k, curval);
+                           + ", Value for " + col + " should be: " + k
+                           + ", but was fetched as: " + curval, k, curval);
               numFetched++;
             }
           }
@@ -266,8 +266,8 @@ public class TestHRegion extends TestCase {
           for(int j = 0; j < cols.length; j++) {
             if(col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                  + ", Value for " + col + " should be: " + k
-                  + ", but was fetched as: " + curval, k, curval);
+                           + ", Value for " + col + " should be: " + k
+                           + ", but was fetched as: " + curval, k, curval);
               numFetched++;
             }
           }
@@ -307,8 +307,8 @@ public class TestHRegion extends TestCase {
           for(int j = 0; j < cols.length; j++) {
             if(col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                  + ", Value for " + col + " should be: " + k
-                  + ", but was fetched as: " + curval, k, curval);
+                           + ", Value for " + col + " should be: " + k
+                           + ", but was fetched as: " + curval, k, curval);
               numFetched++;
             }
           }
@@ -340,7 +340,7 @@ public class TestHRegion extends TestCase {
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
               assertEquals("Value for " + col + " should be: " + k
-                  + ", but was fetched as: " + curval, curval, k);
+                           + ", but was fetched as: " + curval, curval, k);
               numFetched++;
             }
           }
@@ -370,7 +370,7 @@ public class TestHRegion extends TestCase {
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
               assertEquals("Value for " + col + " should be: " + k
-                  + ", but was fetched as: " + curval, curval, k);
+                           + ", but was fetched as: " + curval, curval, k);
               numFetched++;
             }
           }
@@ -511,8 +511,8 @@ public class TestHRegion extends TestCase {
     // First verify the data written by testBasic()
 
     Text[] cols = new Text[] {
-        new Text(ANCHORNUM + "[0-9]+"),
-        new Text(CONTENTS_BASIC)
+      new Text(ANCHORNUM + "[0-9]+"),
+      new Text(CONTENTS_BASIC)
     };
     
     HScannerInterface s = region.getScanner(cols, new Text());
@@ -532,16 +532,16 @@ public class TestHRegion extends TestCase {
 
           if(col.compareTo(CONTENTS_BASIC) == 0) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                + ", Value for " + col + " should start with: " + CONTENTSTR
-                + ", but was fetched as: " + curval,
-                curval.startsWith(CONTENTSTR));
+                       + ", Value for " + col + " should start with: " + CONTENTSTR
+                       + ", but was fetched as: " + curval,
+                       curval.startsWith(CONTENTSTR));
             contentsFetched++;
             
           } else if(col.toString().startsWith(ANCHORNUM)) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
-                + ", Value for " + col + " should start with: " + ANCHORSTR
-                + ", but was fetched as: " + curval,
-                curval.startsWith(ANCHORSTR));
+                       + ", Value for " + col + " should start with: " + ANCHORSTR
+                       + ", but was fetched as: " + curval,
+                       curval.startsWith(ANCHORSTR));
             anchorFetched++;
             
           } else {
@@ -561,8 +561,8 @@ public class TestHRegion extends TestCase {
     // Verify testScan data
     
     cols = new Text[] {
-        CONTENTS_FIRSTCOL,
-        ANCHOR_SECONDCOL
+      CONTENTS_FIRSTCOL,
+      ANCHOR_SECONDCOL
     };
 
     s = region.getScanner(cols, new Text());
@@ -580,7 +580,7 @@ public class TestHRegion extends TestCase {
           for (int j = 0; j < cols.length; j++) {
             if (col.compareTo(cols[j]) == 0) {
               assertEquals("Value for " + col + " should be: " + k
-                  + ", but was fetched as: " + curval, curval, k);
+                           + ", but was fetched as: " + curval, curval, k);
               numFetched++;
             }
           }
@@ -625,7 +625,7 @@ public class TestHRegion extends TestCase {
     // Test a scanner which only specifies the column family name
     
     cols = new Text[] {
-        new Text("anchor:")
+      new Text("anchor:")
     };
     
     s = region.getScanner(cols, new Text());
@@ -672,5 +672,5 @@ public class TestHRegion extends TestCase {
     
     deleteFile(new File(System.getProperty("test.build.data"), "dfs"));
     
-    }
+  }
 }

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java

@@ -74,7 +74,7 @@ public class PipeMapper extends PipeMapRed implements Mapper {
       mapRedFinished();
       throw new IOException ("MROutput/MRErrThread failed:"
                              + StringUtils.stringifyException(
-                                          outerrThreadsThrowable));
+                                                              outerrThreadsThrowable));
     }
     try {
       // 1/4 Hadoop in

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java

@@ -78,7 +78,7 @@ public class PipeReducer extends PipeMapRed implements Reducer {
             mapRedFinished();
             throw new IOException ("MROutput/MRErrThread failed:"
                                    + StringUtils.stringifyException( 
-                                               outerrThreadsThrowable));
+                                                                    outerrThreadsThrowable));
           }
           write(key);
           clientOut_.write('\t');

+ 46 - 46
src/java/org/apache/hadoop/dfs/DataStorage.java

@@ -65,7 +65,7 @@ class DataStorage extends Storage {
   void recoverTransitionRead( NamespaceInfo nsInfo,
                               Collection<File> dataDirs,
                               StartupOption startOpt
-                            ) throws IOException {
+                              ) throws IOException {
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
       "Data-node and name-node layout versions must be the same.";
     
@@ -112,7 +112,7 @@ class DataStorage extends Storage {
 
     if( dataDirs.size() == 0 )  // none of the data dirs exist
       throw new IOException( 
-          "All specified directories are not accessible or do not exist." );
+                            "All specified directories are not accessible or do not exist." );
 
     // 2. Do transitions
     // Each storage directory is treated individually.
@@ -141,21 +141,21 @@ class DataStorage extends Storage {
 
   protected void setFields( Properties props, 
                             StorageDirectory sd 
-                          ) throws IOException {
+                            ) throws IOException {
     super.setFields( props, sd );
     props.setProperty( "storageID", storageID );
   }
 
   protected void getFields( Properties props, 
                             StorageDirectory sd 
-                          ) throws IOException {
+                            ) throws IOException {
     super.getFields( props, sd );
     String ssid = props.getProperty( "storageID" );
     if( ssid == null ||
         ! ("".equals( storageID ) || "".equals( ssid ) ||
-            storageID.equals( ssid )))
+           storageID.equals( ssid )))
       throw new InconsistentFSStateException( sd.root,
-                  "has incompatible storage Id." );
+                                              "has incompatible storage Id." );
     if( "".equals( storageID ) ) // update id only if it was empty
       storageID = ssid;
   }
@@ -168,13 +168,13 @@ class DataStorage extends Storage {
     File oldDataDir = new File( sd.root, "data" );
     if( ! oldDataDir.exists() ) 
       throw new InconsistentFSStateException( sd.root,
-          "Old layout block directory " + oldDataDir + " is missing" ); 
+                                              "Old layout block directory " + oldDataDir + " is missing" ); 
     if( ! oldDataDir.isDirectory() )
       throw new InconsistentFSStateException( sd.root,
-          oldDataDir + " is not a directory." );
+                                              oldDataDir + " is not a directory." );
     if( ! oldDataDir.canWrite() )
       throw new InconsistentFSStateException( sd.root,
-          oldDataDir + " is not writable." );
+                                              oldDataDir + " is not writable." );
     return true;
   }
   
@@ -187,7 +187,7 @@ class DataStorage extends Storage {
    */
   private void convertLayout( StorageDirectory sd,
                               NamespaceInfo nsInfo 
-                            ) throws IOException {
+                              ) throws IOException {
     assert FSConstants.LAYOUT_VERSION < LAST_PRE_UPGRADE_LAYOUT_VERSION :
       "Bad current layout version: FSConstants.LAYOUT_VERSION should decrease";
     File oldF = new File( sd.root, "storage" );
@@ -195,8 +195,8 @@ class DataStorage extends Storage {
     assert oldF.exists() : "Old datanode layout \"storage\" file is missing";
     assert oldDataDir.exists() : "Old layout block directory \"data\" is missing";
     LOG.info( "Old layout version file " + oldF
-            + " is found. New layout version is "
-            + FSConstants.LAYOUT_VERSION );
+              + " is found. New layout version is "
+              + FSConstants.LAYOUT_VERSION );
     LOG.info( "Converting ..." );
     
     // Lock and Read old storage file
@@ -211,7 +211,7 @@ class DataStorage extends Storage {
       int odlVersion = oldFile.readInt();
       if( odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION )
         throw new IncorrectVersionException( odlVersion, "file " + oldF,
-                                              LAST_PRE_UPGRADE_LAYOUT_VERSION );
+                                             LAST_PRE_UPGRADE_LAYOUT_VERSION );
       String odlStorageID = org.apache.hadoop.io.UTF8.readString( oldFile );
   
       // check new storage
@@ -255,7 +255,7 @@ class DataStorage extends Storage {
   private void doTransition(  StorageDirectory sd, 
                               NamespaceInfo nsInfo, 
                               StartupOption startOpt
-                            ) throws IOException {
+                              ) throws IOException {
     if( startOpt == StartupOption.ROLLBACK )
       doRollback( sd, nsInfo ); // rollback if applicable
     sd.read();
@@ -263,9 +263,9 @@ class DataStorage extends Storage {
       "Future version is not allowed";
     if( getNamespaceID() != nsInfo.getNamespaceID() )
       throw new IOException( 
-          "Incompatible namespaceIDs in " + sd.root.getCanonicalPath()
-          + ": namenode namespaceID = " + nsInfo.getNamespaceID() 
-          + "; datanode namespaceID = " + getNamespaceID() );
+                            "Incompatible namespaceIDs in " + sd.root.getCanonicalPath()
+                            + ": namenode namespaceID = " + nsInfo.getNamespaceID() 
+                            + "; datanode namespaceID = " + getNamespaceID() );
     if( this.layoutVersion == FSConstants.LAYOUT_VERSION 
         && this.cTime == nsInfo.getCTime() )
       return; // regular startup
@@ -292,12 +292,12 @@ class DataStorage extends Storage {
    */
   void doUpgrade( StorageDirectory sd,
                   NamespaceInfo nsInfo
-                ) throws IOException {
+                  ) throws IOException {
     LOG.info( "Upgrading storage directory " + sd.root 
-            + ".\n   old LV = " + this.getLayoutVersion()
-            + "; old CTime = " + this.getCTime()
-            + ".\n   new LV = " + nsInfo.getLayoutVersion()
-            + "; new CTime = " + nsInfo.getCTime() );
+              + ".\n   old LV = " + this.getLayoutVersion()
+              + "; old CTime = " + this.getCTime()
+              + ".\n   new LV = " + nsInfo.getLayoutVersion()
+              + "; new CTime = " + nsInfo.getCTime() );
     File curDir = sd.getCurrentDir();
     File prevDir = sd.getPreviousDir();
     assert curDir.exists() : "Current directory must exist.";
@@ -323,7 +323,7 @@ class DataStorage extends Storage {
 
   void doRollback(  StorageDirectory sd,
                     NamespaceInfo nsInfo
-                  ) throws IOException {
+                    ) throws IOException {
     File prevDir = sd.getPreviousDir();
     // regular startup if previous dir does not exist
     if( ! prevDir.exists() )
@@ -335,15 +335,15 @@ class DataStorage extends Storage {
     // We allow rollback to a state, which is either consistent with
     // the namespace state or can be further upgraded to it.
     if( ! ( prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
-        && prevInfo.getCTime() <= nsInfo.getCTime() ))  // cannot rollback
+            && prevInfo.getCTime() <= nsInfo.getCTime() ))  // cannot rollback
       throw new InconsistentFSStateException( prevSD.root,
-          "Cannot rollback to a newer state.\nDatanode previous state: LV = " 
-          + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() 
-          + " is newer than the namespace state: LV = "
-          + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime() );
+                                              "Cannot rollback to a newer state.\nDatanode previous state: LV = " 
+                                              + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() 
+                                              + " is newer than the namespace state: LV = "
+                                              + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime() );
     LOG.info( "Rolling back storage directory " + sd.root 
-        + ".\n   target LV = " + nsInfo.getLayoutVersion()
-        + "; target CTime = " + nsInfo.getCTime() );
+              + ".\n   target LV = " + nsInfo.getLayoutVersion()
+              + "; target CTime = " + nsInfo.getCTime() );
     File tmpDir = sd.getRemovedTmp();
     assert ! tmpDir.exists() : "removed.tmp directory must not exist.";
     // rename current to tmp
@@ -363,9 +363,9 @@ class DataStorage extends Storage {
       return; // already discarded
     final String dataDirPath = sd.root.getCanonicalPath();
     LOG.info( "Finalizing upgrade for storage directory " 
-            + dataDirPath 
-            + ".\n   cur LV = " + this.getLayoutVersion()
-            + "; cur CTime = " + this.getCTime() );
+              + dataDirPath 
+              + ".\n   cur LV = " + this.getLayoutVersion()
+              + "; cur CTime = " + this.getCTime() );
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     final File tmpDir = sd.getFinalizedTmp();
     // rename previous to tmp
@@ -373,16 +373,16 @@ class DataStorage extends Storage {
 
     // delete tmp dir in a separate thread
     new Daemon( new Runnable() {
-      public void run() {
-        try {
-          deleteDir( tmpDir );
-        } catch( IOException ex ) {
-          LOG.error( "Finalize upgrade for " + dataDirPath + " failed.", ex );
+        public void run() {
+          try {
+            deleteDir( tmpDir );
+          } catch( IOException ex ) {
+            LOG.error( "Finalize upgrade for " + dataDirPath + " failed.", ex );
+          }
+          LOG.info( "Finalize upgrade for " + dataDirPath + " is complete." );
         }
-        LOG.info( "Finalize upgrade for " + dataDirPath + " is complete." );
-      }
-      public String toString() { return "Finalize " + dataDirPath; }
-    }).start();
+        public String toString() { return "Finalize " + dataDirPath; }
+      }).start();
   }
   
   void finalizeUpgrade() throws IOException {
@@ -400,11 +400,11 @@ class DataStorage extends Storage {
     if( ! to.mkdir() )
       throw new IOException("Cannot create directory " + to );
     String[] blockNames = from.list( new java.io.FilenameFilter() {
-      public boolean accept(File dir, String name) {
-        return name.startsWith( BLOCK_SUBDIR_PREFIX ) 
+        public boolean accept(File dir, String name) {
+          return name.startsWith( BLOCK_SUBDIR_PREFIX ) 
             || name.startsWith( BLOCK_FILE_PREFIX );
-      }
-    });
+        }
+      });
     
     for( int i = 0; i < blockNames.length; i++ )
       linkBlocks( new File(from, blockNames[i]), new File(to, blockNames[i]) );

+ 4 - 4
src/java/org/apache/hadoop/dfs/DatanodeID.java

@@ -98,7 +98,7 @@ public class DatanodeID implements WritableComparable {
 
   public boolean equals( Object to ) {
     return (name.equals(((DatanodeID)to).getName()) &&
-        storageID.equals(((DatanodeID)to).getStorageID()));
+            storageID.equals(((DatanodeID)to).getStorageID()));
   }
   
   public int hashCode() {
@@ -114,9 +114,9 @@ public class DatanodeID implements WritableComparable {
    * Note that this does not update storageID.
    */
   void updateRegInfo( DatanodeID nodeReg ) {
-      name = nodeReg.getName();
-      infoPort = nodeReg.getInfoPort();
-      // update any more fields added in future.
+    name = nodeReg.getName();
+    infoPort = nodeReg.getInfoPort();
+    // update any more fields added in future.
   }
     
   /** Comparable.

+ 3 - 3
src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java

@@ -18,8 +18,8 @@ class UnregisteredDatanodeException extends IOException {
   public UnregisteredDatanodeException( DatanodeID nodeID, 
                                         DatanodeInfo storedNode ) {
     super("Data node " + nodeID.getName() 
-        + " is attempting to report storage ID "
-        + nodeID.getStorageID() + ". Node " 
-        + storedNode.getName() + " is expected to serve this storage.");
+          + " is attempting to report storage ID "
+          + nodeID.getStorageID() + ". Node " 
+          + storedNode.getName() + " is expected to serve this storage.");
   }
 }

+ 26 - 26
src/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java

@@ -48,38 +48,38 @@ class Jets3tFileSystemStore implements FileSystemStore {
       String secretAccessKey = null;
       String userInfo = uri.getUserInfo();
       if (userInfo != null) {
-          int index = userInfo.indexOf(':');
-          if (index != -1) {
-	          accessKey = userInfo.substring(0, index);
-	          secretAccessKey = userInfo.substring(index + 1);
-          } else {
-        	  accessKey = userInfo;
-          }
+        int index = userInfo.indexOf(':');
+        if (index != -1) {
+          accessKey = userInfo.substring(0, index);
+          secretAccessKey = userInfo.substring(index + 1);
+        } else {
+          accessKey = userInfo;
+        }
       }
       if (accessKey == null) {
-    	  accessKey = conf.get("fs.s3.awsAccessKeyId");
+        accessKey = conf.get("fs.s3.awsAccessKeyId");
       }
       if (secretAccessKey == null) {
-    	  secretAccessKey = conf.get("fs.s3.awsSecretAccessKey");
+        secretAccessKey = conf.get("fs.s3.awsSecretAccessKey");
       }
       if (accessKey == null && secretAccessKey == null) {
-    	  throw new IllegalArgumentException("AWS " +
-    	  		"Access Key ID and Secret Access Key " +
-    	  		"must be specified as the username " +
-    	  		"or password (respectively) of a s3 URL, " +
-    	  		"or by setting the " +
-	  		    "fs.s3.awsAccessKeyId or " +    	  		
-    	  		"fs.s3.awsSecretAccessKey properties (respectively).");
+        throw new IllegalArgumentException("AWS " +
+                                           "Access Key ID and Secret Access Key " +
+                                           "must be specified as the username " +
+                                           "or password (respectively) of a s3 URL, " +
+                                           "or by setting the " +
+                                           "fs.s3.awsAccessKeyId or " +    	  		
+                                           "fs.s3.awsSecretAccessKey properties (respectively).");
       } else if (accessKey == null) {
-    	  throw new IllegalArgumentException("AWS " +
-      	  		"Access Key ID must be specified " +
-      	  		"as the username of a s3 URL, or by setting the " +
-      	  		"fs.s3.awsAccessKeyId property.");
+        throw new IllegalArgumentException("AWS " +
+                                           "Access Key ID must be specified " +
+                                           "as the username of a s3 URL, or by setting the " +
+                                           "fs.s3.awsAccessKeyId property.");
       } else if (secretAccessKey == null) {
-    	  throw new IllegalArgumentException("AWS " +
-    	  		"Secret Access Key must be specified " +
-    	  		"as the password of a s3 URL, or by setting the " +
-    	  		"fs.s3.awsSecretAccessKey property.");    	  
+        throw new IllegalArgumentException("AWS " +
+                                           "Secret Access Key must be specified " +
+                                           "as the password of a s3 URL, or by setting the " +
+                                           "fs.s3.awsSecretAccessKey property.");    	  
       }
       AWSCredentials awsCredentials = new AWSCredentials(accessKey, secretAccessKey);
       this.s3Service = new RestS3Service(awsCredentials);
@@ -162,7 +162,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
   private InputStream get(String key, long byteRangeStart) throws IOException {
     try {
       S3Object object = s3Service.getObject(bucket, key, null, null, null,
-          null, byteRangeStart, null);
+                                            null, byteRangeStart, null);
       return object.getDataInputStream();
     } catch (S3ServiceException e) {
       if (e.getS3ErrorCode().equals("NoSuchKey")) {
@@ -180,7 +180,7 @@ class Jets3tFileSystemStore implements FileSystemStore {
   }
 
   public File retrieveBlock(Block block, long byteRangeStart)
-      throws IOException {
+    throws IOException {
     File fileBlock = null;
     InputStream in = null;
     OutputStream out = null;

+ 48 - 48
src/java/org/apache/hadoop/fs/s3/S3InputStream.java

@@ -27,7 +27,7 @@ class S3InputStream extends FSInputStream {
   private long blockEnd = -1;
 
   public S3InputStream(Configuration conf, FileSystemStore store,
-      INode inode) {
+                       INode inode) {
     
     this.store = store;
     this.blocks = inode.getBlocks();
@@ -37,65 +37,65 @@ class S3InputStream extends FSInputStream {
   }
 
   @Override
-  public synchronized long getPos() throws IOException {
-    return pos;
-  }
+    public synchronized long getPos() throws IOException {
+      return pos;
+    }
 
   @Override
-  public synchronized int available() throws IOException {
-    return (int) (fileLength - pos);
-  }
+    public synchronized int available() throws IOException {
+      return (int) (fileLength - pos);
+    }
 
   @Override
-  public synchronized void seek(long targetPos) throws IOException {
-    if (targetPos > fileLength) {
-      throw new IOException("Cannot seek after EOF");
+    public synchronized void seek(long targetPos) throws IOException {
+      if (targetPos > fileLength) {
+        throw new IOException("Cannot seek after EOF");
+      }
+      pos = targetPos;
+      blockEnd = -1;
     }
-    pos = targetPos;
-    blockEnd = -1;
-  }
 
   @Override
-  public synchronized boolean seekToNewSource(long targetPos) throws IOException {
-    return false;
-  }
+    public synchronized boolean seekToNewSource(long targetPos) throws IOException {
+      return false;
+    }
 
   @Override
-  public synchronized int read() throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
-    int result = -1;
-    if (pos < fileLength) {
-      if (pos > blockEnd) {
-        blockSeekTo(pos);
+    public synchronized int read() throws IOException {
+      if (closed) {
+        throw new IOException("Stream closed");
       }
-      result = blockStream.read();
-      if (result >= 0) {
-        pos++;
+      int result = -1;
+      if (pos < fileLength) {
+        if (pos > blockEnd) {
+          blockSeekTo(pos);
+        }
+        result = blockStream.read();
+        if (result >= 0) {
+          pos++;
+        }
       }
+      return result;
     }
-    return result;
-  }
 
   @Override
-  public synchronized int read(byte buf[], int off, int len) throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
-    if (pos < fileLength) {
-      if (pos > blockEnd) {
-        blockSeekTo(pos);
+    public synchronized int read(byte buf[], int off, int len) throws IOException {
+      if (closed) {
+        throw new IOException("Stream closed");
       }
-      int realLen = Math.min(len, (int) (blockEnd - pos + 1));
-      int result = blockStream.read(buf, off, realLen);
-      if (result >= 0) {
-        pos += result;
+      if (pos < fileLength) {
+        if (pos > blockEnd) {
+          blockSeekTo(pos);
+        }
+        int realLen = Math.min(len, (int) (blockEnd - pos + 1));
+        int result = blockStream.read(buf, off, realLen);
+        if (result >= 0) {
+          pos += result;
+        }
+        return result;
       }
-      return result;
+      return -1;
     }
-    return -1;
-  }
 
   private synchronized void blockSeekTo(long target) throws IOException {
     //
@@ -117,7 +117,7 @@ class S3InputStream extends FSInputStream {
     }
     if (targetBlock < 0) {
       throw new IOException(
-          "Impossible situation: could not find target position " + target);
+                            "Impossible situation: could not find target position " + target);
     }
     long offsetIntoBlock = target - targetBlockStart;
 
@@ -132,7 +132,7 @@ class S3InputStream extends FSInputStream {
   }
 
   @Override
-  public void close() throws IOException {
+    public void close() throws IOException {
     if (closed) {
       throw new IOException("Stream closed");
     }
@@ -151,17 +151,17 @@ class S3InputStream extends FSInputStream {
    * We don't support marks.
    */
   @Override
-  public boolean markSupported() {
+    public boolean markSupported() {
     return false;
   }
 
   @Override
-  public void mark(int readLimit) {
+    public void mark(int readLimit) {
     // Do nothing
   }
 
   @Override
-  public void reset() throws IOException {
+    public void reset() throws IOException {
     throw new IOException("Mark not supported");
   }
 

+ 51 - 51
src/java/org/apache/hadoop/fs/s3/S3OutputStream.java

@@ -46,7 +46,7 @@ class S3OutputStream extends OutputStream {
   private Block nextBlock;
 
   public S3OutputStream(Configuration conf, FileSystemStore store,
-      Path path, long blockSize, Progressable progress) throws IOException {
+                        Path path, long blockSize, Progressable progress) throws IOException {
     
     this.conf = conf;
     this.store = store;
@@ -74,52 +74,52 @@ class S3OutputStream extends OutputStream {
   }
 
   @Override
-  public synchronized void write(int b) throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
+    public synchronized void write(int b) throws IOException {
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
 
-    if ((bytesWrittenToBlock + pos == blockSize) || (pos >= bufferSize)) {
-      flush();
+      if ((bytesWrittenToBlock + pos == blockSize) || (pos >= bufferSize)) {
+        flush();
+      }
+      outBuf[pos++] = (byte) b;
+      filePos++;
     }
-    outBuf[pos++] = (byte) b;
-    filePos++;
-  }
 
   @Override
-  public synchronized void write(byte b[], int off, int len) throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
-    while (len > 0) {
-      int remaining = bufferSize - pos;
-      int toWrite = Math.min(remaining, len);
-      System.arraycopy(b, off, outBuf, pos, toWrite);
-      pos += toWrite;
-      off += toWrite;
-      len -= toWrite;
-      filePos += toWrite;
-
-      if ((bytesWrittenToBlock + pos >= blockSize) || (pos == bufferSize)) {
-        flush();
+    public synchronized void write(byte b[], int off, int len) throws IOException {
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
+      while (len > 0) {
+        int remaining = bufferSize - pos;
+        int toWrite = Math.min(remaining, len);
+        System.arraycopy(b, off, outBuf, pos, toWrite);
+        pos += toWrite;
+        off += toWrite;
+        len -= toWrite;
+        filePos += toWrite;
+
+        if ((bytesWrittenToBlock + pos >= blockSize) || (pos == bufferSize)) {
+          flush();
+        }
       }
     }
-  }
 
   @Override
-  public synchronized void flush() throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
+    public synchronized void flush() throws IOException {
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
 
-    if (bytesWrittenToBlock + pos >= blockSize) {
-      flushData((int) blockSize - bytesWrittenToBlock);
-    }
-    if (bytesWrittenToBlock == blockSize) {
-      endBlock();
+      if (bytesWrittenToBlock + pos >= blockSize) {
+        flushData((int) blockSize - bytesWrittenToBlock);
+      }
+      if (bytesWrittenToBlock == blockSize) {
+        endBlock();
+      }
+      flushData(pos);
     }
-    flushData(pos);
-  }
 
   private synchronized void flushData(int maxPos) throws IOException {
     int workingPos = Math.min(pos, maxPos);
@@ -174,27 +174,27 @@ class S3OutputStream extends OutputStream {
 
   private synchronized void internalClose() throws IOException {
     INode inode = new INode(FileType.FILE, blocks.toArray(new Block[blocks
-        .size()]));
+                                                                    .size()]));
     store.storeINode(path, inode);
   }
 
   @Override
-  public synchronized void close() throws IOException {
-    if (closed) {
-      throw new IOException("Stream closed");
-    }
+    public synchronized void close() throws IOException {
+      if (closed) {
+        throw new IOException("Stream closed");
+      }
 
-    flush();
-    if (filePos == 0 || bytesWrittenToBlock != 0) {
-      endBlock();
-    }
+      flush();
+      if (filePos == 0 || bytesWrittenToBlock != 0) {
+        endBlock();
+      }
 
-    backupStream.close();
-    backupFile.delete();
+      backupStream.close();
+      backupFile.delete();
 
-    super.close();
+      super.close();
 
-    closed = true;
-  }
+      closed = true;
+    }
 
 }

+ 1 - 1
src/java/org/apache/hadoop/io/ObjectWritable.java

@@ -169,7 +169,7 @@ public class ObjectWritable implements Writable, Configurable {
   /** Read a {@link Writable}, {@link String}, primitive type, or an array of
    * the preceding. */
   @SuppressWarnings("unchecked")
-  public static Object readObject(DataInput in, ObjectWritable objectWritable, Configuration conf)
+    public static Object readObject(DataInput in, ObjectWritable objectWritable, Configuration conf)
     throws IOException {
     String className = UTF8.readString(in);
     Class<?> declaredClass = PRIMITIVE_NAMES.get(className);

+ 1 - 1
src/java/org/apache/hadoop/mapred/ClusterStatus.java

@@ -20,7 +20,7 @@ public class ClusterStatus implements Writable {
        new WritableFactory() {
          public Writable newInstance() { return new ClusterStatus(); }
        });
-    }
+  }
 
   private int task_trackers;
   private int map_tasks;

+ 13 - 13
src/java/org/apache/hadoop/mapred/DefaultJobHistoryParser.java

@@ -20,7 +20,7 @@ public class DefaultJobHistoryParser {
    * @throws IOException
    */
   public static Map<String, Map<String, JobHistory.JobInfo>> parseMasterIndex(File historyFile)
-      throws IOException {
+    throws IOException {
     MasterIndexParseListener parser = new MasterIndexParseListener();
     JobHistory.parseHistory(historyFile, parser);
 
@@ -34,16 +34,16 @@ public class DefaultJobHistoryParser {
    * @throws IOException
    */
   public static void parseJobTasks(File jobHistoryFile, JobHistory.JobInfo job)
-      throws IOException {
+    throws IOException {
     JobHistory.parseHistory(jobHistoryFile, 
-        new JobTasksParseListener(job));
+                            new JobTasksParseListener(job));
   }
-/**
- * Listener for Job's history log file, it populates JobHistory.JobInfo 
- * object with data from log file. 
- */
+  /**
+   * Listener for Job's history log file, it populates JobHistory.JobInfo 
+   * object with data from log file. 
+   */
   static class JobTasksParseListener
-      implements JobHistory.Listener {
+    implements JobHistory.Listener {
     JobHistory.JobInfo job;
 
     JobTasksParseListener(JobHistory.JobInfo job) {
@@ -61,7 +61,7 @@ public class DefaultJobHistoryParser {
     }
 
     private JobHistory.MapAttempt getMapAttempt(
-        String jobid, String jobTrackerId, String taskId, String taskAttemptId) {
+                                                String jobid, String jobTrackerId, String taskId, String taskAttemptId) {
 
       JobHistory.Task task = getTask(taskId);
       JobHistory.MapAttempt mapAttempt = 
@@ -75,7 +75,7 @@ public class DefaultJobHistoryParser {
     }
 
     private JobHistory.ReduceAttempt getReduceAttempt(
-        String jobid, String jobTrackerId, String taskId, String taskAttemptId) {
+                                                      String jobid, String jobTrackerId, String taskId, String taskAttemptId) {
 
       JobHistory.Task task = getTask(taskId);
       JobHistory.ReduceAttempt reduceAttempt = 
@@ -90,7 +90,7 @@ public class DefaultJobHistoryParser {
 
     // JobHistory.Listener implementation 
     public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values)
-        throws IOException {
+      throws IOException {
       String jobTrackerId = values.get(JobHistory.Keys.JOBTRACKERID);
       String jobid = values.get(Keys.JOBID);
       
@@ -119,7 +119,7 @@ public class DefaultJobHistoryParser {
    * 
    */
   static class MasterIndexParseListener
-      implements JobHistory.Listener {
+    implements JobHistory.Listener {
     Map<String, Map<String, JobHistory.JobInfo>> jobTrackerToJobs = new TreeMap<String, Map<String, JobHistory.JobInfo>>();
 
     Map<String, JobHistory.JobInfo> activeJobs = null;
@@ -128,7 +128,7 @@ public class DefaultJobHistoryParser {
     // Implement JobHistory.Listener
 
     public void handle(JobHistory.RecordTypes recType, Map<Keys, String> values)
-        throws IOException {
+      throws IOException {
  
       if (recType.equals(JobHistory.RecordTypes.Jobtracker)) {
         activeJobs = new TreeMap<String, JobHistory.JobInfo>();

+ 143 - 143
src/java/org/apache/hadoop/mapred/JobHistory.java

@@ -60,10 +60,10 @@ public class JobHistory {
    * It acts as a global namespace for all keys. 
    */
   public static enum Keys { JOBTRACKERID,
-    START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF,SUBMIT_TIME, LAUNCH_TIME, 
-    TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES, FINISHED_MAPS, FINISHED_REDUCES,
-    JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE, ERROR, TASK_ATTEMPT_ID, TASK_STATUS, 
-    COPY_PHASE, SORT_PHASE, REDUCE_PHASE, SHUFFLE_FINISHED, SORT_FINISHED 
+                            START_TIME, FINISH_TIME, JOBID, JOBNAME, USER, JOBCONF,SUBMIT_TIME, LAUNCH_TIME, 
+                            TOTAL_MAPS, TOTAL_REDUCES, FAILED_MAPS, FAILED_REDUCES, FINISHED_MAPS, FINISHED_REDUCES,
+                            JOB_STATUS, TASKID, HOSTNAME, TASK_TYPE, ERROR, TASK_ATTEMPT_ID, TASK_STATUS, 
+                            COPY_PHASE, SORT_PHASE, REDUCE_PHASE, SHUFFLE_FINISHED, SORT_FINISHED 
   };
   /**
    * This enum contains some of the values commonly used by history log events. 
@@ -94,7 +94,7 @@ public class JobHistory {
         }
         masterIndex = 
           new PrintWriter(
-              new FileOutputStream(new File( LOG_DIR + File.separator + MASTER_INDEX_LOG_FILE), true )) ;
+                          new FileOutputStream(new File( LOG_DIR + File.separator + MASTER_INDEX_LOG_FILE), true )) ;
         // add jobtracker id = tracker start time
         log(masterIndex, RecordTypes.Jobtracker, Keys.START_TIME, JOBTRACKER_START_TIME);  
       }catch(IOException e){
@@ -114,17 +114,17 @@ public class JobHistory {
    * @throws IOException
    */
   public static void parseHistory(File path, Listener l) throws IOException{
-      BufferedReader reader = new BufferedReader(new FileReader(path));
-      String line = null ; 
-      StringBuffer buf = new StringBuffer(); 
-      while ((line = reader.readLine())!= null){
-        buf.append(line); 
-        if( ! line.trim().endsWith("\"")){
-          continue ; 
-        }
-        parseLine(buf.toString(), l );
-        buf = new StringBuffer(); 
+    BufferedReader reader = new BufferedReader(new FileReader(path));
+    String line = null ; 
+    StringBuffer buf = new StringBuffer(); 
+    while ((line = reader.readLine())!= null){
+      buf.append(line); 
+      if( ! line.trim().endsWith("\"")){
+        continue ; 
       }
+      parseLine(buf.toString(), l );
+      buf = new StringBuffer(); 
+    }
   }
   /**
    * Parse a single line of history. 
@@ -305,13 +305,13 @@ public class JobHistory {
      * @param jobConf path to job conf xml file in HDFS. 
      */
     public static void logSubmitted(String jobId, String jobName, String user, 
-        long submitTime, String jobConf){
+                                    long submitTime, String jobConf){
       
       if( ! disableHistory ){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job, 
-              new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
-              new String[]{jobId, jobName, user, String.valueOf(submitTime),jobConf });
+                         new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
+                         new String[]{jobId, jobName, user, String.valueOf(submitTime),jobConf });
         }
         // setup the history log file for this job
         String logFileName =  JOBTRACKER_START_TIME + "_" + jobId ; 
@@ -322,8 +322,8 @@ public class JobHistory {
           openJobs.put(logFileName, writer);
           // add to writer as well 
           JobHistory.log(writer, RecordTypes.Job, 
-              new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
-              new String[]{jobId, jobName, user, String.valueOf(submitTime) ,jobConf}); 
+                         new Enum[]{Keys.JOBID, Keys.JOBNAME, Keys.USER, Keys.SUBMIT_TIME, Keys.JOBCONF }, 
+                         new String[]{jobId, jobName, user, String.valueOf(submitTime) ,jobConf}); 
              
         }catch(IOException e){
           LOG.error("Failed creating job history log file, disabling history", e);
@@ -342,9 +342,9 @@ public class JobHistory {
       if( ! disableHistory ){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job, 
-              new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
-              new String[] {jobId,  String.valueOf(startTime), 
-                String.valueOf(totalMaps), String.valueOf(totalReduces) } ) ; 
+                         new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME, Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
+                         new String[] {jobId,  String.valueOf(startTime), 
+                                       String.valueOf(totalMaps), String.valueOf(totalReduces) } ) ; 
         }
         
         String logFileName =  JOBTRACKER_START_TIME + "_" + jobId ; 
@@ -352,8 +352,8 @@ public class JobHistory {
         
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.Job, 
-              new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME,Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
-              new String[] {jobId,  String.valueOf(startTime), String.valueOf(totalMaps), String.valueOf(totalReduces)} ) ; 
+                         new Enum[] {Keys.JOBID, Keys.LAUNCH_TIME,Keys.TOTAL_MAPS, Keys.TOTAL_REDUCES },
+                         new String[] {jobId,  String.valueOf(startTime), String.valueOf(totalMaps), String.valueOf(totalReduces)} ) ; 
         }
       }
     }
@@ -367,13 +367,13 @@ public class JobHistory {
      * @param failedReduces no of failed reduce tasks. 
      */ 
     public static void logFinished(String jobId, long finishTime, int finishedMaps, int finishedReduces,
-        int failedMaps, int failedReduces){
+                                   int failedMaps, int failedReduces){
       if( ! disableHistory ){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job,          
-              new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
-              new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
-                String.valueOf(finishedMaps), String.valueOf(finishedReduces) } ) ;
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
+                         new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
+                                       String.valueOf(finishedMaps), String.valueOf(finishedReduces) } ) ;
         }
         
         // close job file for this job
@@ -381,11 +381,11 @@ public class JobHistory {
         PrintWriter writer = openJobs.get(logFileName); 
         if( null != writer){
           JobHistory.log(writer, RecordTypes.Job,          
-              new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES,
-              Keys.FAILED_MAPS, Keys.FAILED_REDUCES},
-              new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
-                String.valueOf(finishedMaps), String.valueOf(finishedReduces),
-                String.valueOf(failedMaps), String.valueOf(failedReduces)} ) ;
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES,
+                                     Keys.FAILED_MAPS, Keys.FAILED_REDUCES},
+                         new String[] {jobId,  "" + finishTime, Values.SUCCESS.name(), 
+                                       String.valueOf(finishedMaps), String.valueOf(finishedReduces),
+                                       String.valueOf(failedMaps), String.valueOf(failedReduces)} ) ;
           writer.close();
           openJobs.remove(logFileName); 
         }
@@ -404,20 +404,20 @@ public class JobHistory {
       if( ! disableHistory ){
         synchronized(MASTER_INDEX_LOG_FILE){
           JobHistory.log(masterIndex, RecordTypes.Job,
-              new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
-              new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
-                String.valueOf(finishedReduces)} ) ; 
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS, Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
+                         new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
+                                       String.valueOf(finishedReduces)} ) ; 
+        }
+        String logFileName =  JOBTRACKER_START_TIME + "_" + jobid ; 
+        PrintWriter writer = (PrintWriter)openJobs.get(logFileName); 
+        if( null != writer){
+          JobHistory.log(writer, RecordTypes.Job,
+                         new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS,Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
+                         new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
+                                       String.valueOf(finishedReduces)} ) ; 
+          writer.close();
+          openJobs.remove(logFileName); 
         }
-          String logFileName =  JOBTRACKER_START_TIME + "_" + jobid ; 
-          PrintWriter writer = (PrintWriter)openJobs.get(logFileName); 
-          if( null != writer){
-            JobHistory.log(writer, RecordTypes.Job,
-                new Enum[] {Keys.JOBID, Keys.FINISH_TIME, Keys.JOB_STATUS,Keys.FINISHED_MAPS, Keys.FINISHED_REDUCES },
-                new String[] {jobid,  String.valueOf(timestamp), Values.FAILED.name(), String.valueOf(finishedMaps), 
-                  String.valueOf(finishedReduces)} ) ; 
-            writer.close();
-            openJobs.remove(logFileName); 
-          }
       }
     }
   }
@@ -437,12 +437,12 @@ public class JobHistory {
      * @param startTime startTime of tip. 
      */
     public static void logStarted(String jobId, String taskId, String taskType, 
-         long startTime){
+                                  long startTime){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId); 
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.Task, new Enum[]{Keys.TASKID, Keys.TASK_TYPE , Keys.START_TIME}, 
-              new String[]{taskId, taskType, String.valueOf(startTime)}) ;
+                         new String[]{taskId, taskType, String.valueOf(startTime)}) ;
         }
       }
     }
@@ -454,13 +454,13 @@ public class JobHistory {
      * @param finishTime finish timeof task in ms
      */
     public static void logFinished(String jobId, String taskId, String taskType, 
-        long finishTime){
+                                   long finishTime){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId); 
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.Task, new Enum[]{Keys.TASKID, Keys.TASK_TYPE, 
-              Keys.TASK_STATUS, Keys.FINISH_TIME}, 
-              new String[]{ taskId,taskType, Values.SUCCESS.name(), String.valueOf(finishTime)}) ;
+                                                              Keys.TASK_STATUS, Keys.FINISH_TIME}, 
+                         new String[]{ taskId,taskType, Values.SUCCESS.name(), String.valueOf(finishTime)}) ;
         }
       }
     }
@@ -477,8 +477,8 @@ public class JobHistory {
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId); 
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.Task, new Enum[]{Keys.TASKID, Keys.TASK_TYPE, 
-              Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.ERROR}, 
-              new String[]{ taskId,  taskType, Values.FAILED.name(), String.valueOf(time) , error}) ;
+                                                              Keys.TASK_STATUS, Keys.FINISH_TIME, Keys.ERROR}, 
+                         new String[]{ taskId,  taskType, Values.FAILED.name(), String.valueOf(time) , error}) ;
         }
       }
     }
@@ -500,43 +500,43 @@ public class JobHistory {
    * a Map Attempt on a node.
    */
   public static class MapAttempt extends TaskAttempt{
-   /**
-    * Log start time of this map task attempt. 
-    * @param jobId job id
-    * @param taskId task id
-    * @param taskAttemptId task attempt id
-    * @param startTime start time of task attempt as reported by task tracker. 
-    * @param hostName host name of the task attempt. 
-    */
-   public static void logStarted(String jobId, String taskId,String taskAttemptId, long startTime, String hostName){
-     if( ! disableHistory ){
-       PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
-       if( null != writer ){
-         JobHistory.log( writer, RecordTypes.MapAttempt, 
-             new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, 
-               Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
-             new String[]{Values.MAP.name(),  taskId, 
-                taskAttemptId, String.valueOf(startTime), hostName} ) ; 
-       }
+    /**
+     * Log start time of this map task attempt. 
+     * @param jobId job id
+     * @param taskId task id
+     * @param taskAttemptId task attempt id
+     * @param startTime start time of task attempt as reported by task tracker. 
+     * @param hostName host name of the task attempt. 
+     */
+    public static void logStarted(String jobId, String taskId,String taskAttemptId, long startTime, String hostName){
+      if( ! disableHistory ){
+        PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
+        if( null != writer ){
+          JobHistory.log( writer, RecordTypes.MapAttempt, 
+                          new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, 
+                                      Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
+                          new String[]{Values.MAP.name(),  taskId, 
+                                       taskAttemptId, String.valueOf(startTime), hostName} ) ; 
+        }
       }
     }
-   /**
-    * Log finish time of map task attempt. 
-    * @param jobId job id
-    * @param taskId task id
-    * @param taskAttemptId task attempt id 
-    * @param finishTime finish time
-    * @param hostName host name 
-    */
+    /**
+     * Log finish time of map task attempt. 
+     * @param jobId job id
+     * @param taskId task id
+     * @param taskAttemptId task attempt id 
+     * @param finishTime finish time
+     * @param hostName host name 
+     */
     public static void logFinished(String jobId, String taskId, String taskAttemptId, long finishTime, String hostName){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if( null != writer ){
           JobHistory.log(writer, RecordTypes.MapAttempt, 
-              new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
-              Keys.FINISH_TIME, Keys.HOSTNAME},
-              new String[]{Values.MAP.name(), taskId, taskAttemptId, Values.SUCCESS.name(),  
-              String.valueOf(finishTime), hostName} ) ; 
+                         new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                                     Keys.FINISH_TIME, Keys.HOSTNAME},
+                         new String[]{Values.MAP.name(), taskId, taskAttemptId, Values.SUCCESS.name(),  
+                                      String.valueOf(finishTime), hostName} ) ; 
         }
       }
     }
@@ -550,15 +550,15 @@ public class JobHistory {
      * @param error error message if any for this task attempt. 
      */
     public static void logFailed(String jobId, String taskId, String taskAttemptId, 
-        long timestamp, String hostName, String error){
+                                 long timestamp, String hostName, String error){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if( null != writer ){
           JobHistory.log( writer, RecordTypes.MapAttempt, 
-              new Enum[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
-                Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR},
-              new String[]{ Values.MAP.name(), taskId, taskAttemptId, Values.FAILED.name(),
-                String.valueOf(timestamp), hostName, error} ) ; 
+                          new Enum[]{Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                                     Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR},
+                          new String[]{ Values.MAP.name(), taskId, taskAttemptId, Values.FAILED.name(),
+                                        String.valueOf(timestamp), hostName, error} ) ; 
         }
       }
     } 
@@ -577,18 +577,18 @@ public class JobHistory {
      * @param hostName host name 
      */
     public static void logStarted(String jobId, String taskId, String taskAttemptId, 
-        long startTime, String hostName){
+                                  long startTime, String hostName){
       if( ! disableHistory ){
         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
         if( null != writer ){
           JobHistory.log( writer, RecordTypes.ReduceAttempt, 
-              new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, 
-                Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
-              new String[]{Values.REDUCE.name(),  taskId, 
-                taskAttemptId, String.valueOf(startTime), hostName} ) ; 
+                          new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, 
+                                       Keys.TASK_ATTEMPT_ID, Keys.START_TIME, Keys.HOSTNAME},
+                          new String[]{Values.REDUCE.name(),  taskId, 
+                                       taskAttemptId, String.valueOf(startTime), hostName} ) ; 
         }
       }
-     }
+    }
     /**
      * Log finished event of this task. 
      * @param jobId job id
@@ -599,42 +599,42 @@ public class JobHistory {
      * @param finishTime finish time of task
      * @param hostName host name where task attempt executed
      */
-     public static void logFinished(String jobId, String taskId, String taskAttemptId, 
-        long shuffleFinished, long sortFinished, long finishTime, String hostName){
-       if( ! disableHistory ){
-         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
-         if( null != writer ){
-           JobHistory.log( writer, RecordTypes.ReduceAttempt, 
-               new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
-               Keys.SHUFFLE_FINISHED, Keys.SORT_FINISHED, Keys.FINISH_TIME, Keys.HOSTNAME},
-               new String[]{Values.REDUCE.name(),  taskId, taskAttemptId, Values.SUCCESS.name(), 
-               String.valueOf(shuffleFinished), String.valueOf(sortFinished),
-               String.valueOf(finishTime), hostName} ) ; 
-         }
-       }
-     }
-     /**
-      * Log failed reduce task attempt. 
-      * @param jobId job id 
-      * @param taskId task id
-      * @param taskAttemptId task attempt id
-      * @param timestamp time stamp when task failed
-      * @param hostName host name of the task attempt.  
-      * @param error error message of the task. 
-      */
-     public static void logFailed(String jobId, String taskId,String taskAttemptId, long timestamp, 
-          String hostName, String error){
-       if( ! disableHistory ){
-         PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
-         if( null != writer ){
-           JobHistory.log( writer, RecordTypes.ReduceAttempt, 
-               new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID,Keys.TASK_STATUS, 
-                 Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR },
-               new String[]{ Values.REDUCE.name(), taskId, taskAttemptId, Values.FAILED.name(), 
-               String.valueOf(timestamp), hostName, error } ) ; 
-         }
-       }
-     }
+    public static void logFinished(String jobId, String taskId, String taskAttemptId, 
+                                   long shuffleFinished, long sortFinished, long finishTime, String hostName){
+      if( ! disableHistory ){
+        PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
+        if( null != writer ){
+          JobHistory.log( writer, RecordTypes.ReduceAttempt, 
+                          new Enum[]{ Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID, Keys.TASK_STATUS, 
+                                      Keys.SHUFFLE_FINISHED, Keys.SORT_FINISHED, Keys.FINISH_TIME, Keys.HOSTNAME},
+                          new String[]{Values.REDUCE.name(),  taskId, taskAttemptId, Values.SUCCESS.name(), 
+                                       String.valueOf(shuffleFinished), String.valueOf(sortFinished),
+                                       String.valueOf(finishTime), hostName} ) ; 
+        }
+      }
+    }
+    /**
+     * Log failed reduce task attempt. 
+     * @param jobId job id 
+     * @param taskId task id
+     * @param taskAttemptId task attempt id
+     * @param timestamp time stamp when task failed
+     * @param hostName host name of the task attempt.  
+     * @param error error message of the task. 
+     */
+    public static void logFailed(String jobId, String taskId,String taskAttemptId, long timestamp, 
+                                 String hostName, String error){
+      if( ! disableHistory ){
+        PrintWriter writer = (PrintWriter)openJobs.get(JOBTRACKER_START_TIME + "_" + jobId);
+        if( null != writer ){
+          JobHistory.log( writer, RecordTypes.ReduceAttempt, 
+                          new Enum[]{  Keys.TASK_TYPE, Keys.TASKID, Keys.TASK_ATTEMPT_ID,Keys.TASK_STATUS, 
+                                       Keys.FINISH_TIME, Keys.HOSTNAME, Keys.ERROR },
+                          new String[]{ Values.REDUCE.name(), taskId, taskAttemptId, Values.FAILED.name(), 
+                                        String.valueOf(timestamp), hostName, error } ) ; 
+        }
+      }
+    }
   }
   /**
    * Callback interface for reading back log events from JobHistory. This interface 
@@ -677,12 +677,12 @@ public class JobHistory {
       if( lastRan ==0 || (now - lastRan) < ONE_DAY_IN_MS ){
         return ; 
       }
-       lastRan = now;  
-       isRunning = true ; 
-        // update master Index first
-        try{
+      lastRan = now;  
+      isRunning = true ; 
+      // update master Index first
+      try{
         File logFile = new File(
-            LOG_DIR + File.separator + MASTER_INDEX_LOG_FILE); 
+                                LOG_DIR + File.separator + MASTER_INDEX_LOG_FILE); 
         
         synchronized(MASTER_INDEX_LOG_FILE){
           Map<String, Map<String, JobHistory.JobInfo>> jobTrackersToJobs = 
@@ -728,14 +728,14 @@ public class JobHistory {
       }
       
       File[] oldFiles = new File(LOG_DIR).listFiles(new FileFilter(){
-        public boolean accept(File file){
-          // delete if older than 30 days
-          if( now - file.lastModified() > THIRTY_DAYS_IN_MS ){
-            return true ; 
-          }
+          public boolean accept(File file){
+            // delete if older than 30 days
+            if( now - file.lastModified() > THIRTY_DAYS_IN_MS ){
+              return true ; 
+            }
             return false; 
-        }
-      });
+          }
+        });
       for( File f : oldFiles){
         f.delete(); 
         LOG.info("Deleting old history file : " + f.getName());

+ 3 - 3
src/java/org/apache/hadoop/mapred/LineRecordReader.java

@@ -45,7 +45,7 @@ public class LineRecordReader implements RecordReader {
   private TextStuffer bridge = new TextStuffer();
 
   public LineRecordReader(Configuration job, FileSplit split)
-      throws IOException {
+    throws IOException {
     long start = split.getStart();
     long end = start + split.getLength();
     final Path file = split.getPath();
@@ -77,7 +77,7 @@ public class LineRecordReader implements RecordReader {
     this.start = offset;
     this.pos = offset;
     this.end = endOffset;    
-//    readLine(in, null); 
+    //    readLine(in, null); 
   }
   
   public WritableComparable createKey() {
@@ -111,7 +111,7 @@ public class LineRecordReader implements RecordReader {
   }
 
   public static long readLine(InputStream in, 
-      OutputStream out) throws IOException {
+                              OutputStream out) throws IOException {
     long bytes = 0;
     while (true) {
       

+ 46 - 46
src/java/org/apache/hadoop/mapred/PhasedFileSystem.java

@@ -44,7 +44,7 @@ public class PhasedFileSystem extends FilterFileSystem {
    * @param taskid taskId
    */
   public PhasedFileSystem(FileSystem fs, String jobid, 
-      String tipid, String taskid) {
+                          String tipid, String taskid) {
     super(fs); 
     this.jobid = jobid; 
     this.tipid = tipid ; 
@@ -73,7 +73,7 @@ public class PhasedFileSystem extends FilterFileSystem {
     if( finalNameToFileInfo.containsKey(finalFile) ){
       if( !overwrite ){
         throw new IOException("Error, file already exists : " + 
-            finalFile.toString()); 
+                              finalFile.toString()); 
       }else{
         // delete tempp file and let create a new one. 
         FileInfo fInfo = finalNameToFileInfo.get(finalFile); 
@@ -100,14 +100,14 @@ public class PhasedFileSystem extends FilterFileSystem {
   }
   
   public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-          short replication, long blockSize,Progressable progress)
-      throws IOException {
+                                   short replication, long blockSize,Progressable progress)
+    throws IOException {
     if( fs.exists(f) && !overwrite ){
       throw new IOException("Error creating file - already exists : " + f); 
     }
     FSDataOutputStream stream = 
       fs.create(setupFile(f, overwrite), overwrite, bufferSize, replication, 
-          blockSize, progress);
+                blockSize, progress);
     finalNameToFileInfo.get(f).setOpenFileStream(stream); 
     return stream ; 
   }
@@ -128,7 +128,7 @@ public class PhasedFileSystem extends FilterFileSystem {
     FileInfo fInfo = finalNameToFileInfo.get(fPath) ; 
     if( null == fInfo ){
       throw new IOException("Error committing file! File was not created " + 
-          "with PhasedFileSystem : " + fPath); 
+                            "with PhasedFileSystem : " + fPath); 
     }
     try{
       fInfo.getOpenFileStream().close();
@@ -156,7 +156,7 @@ public class PhasedFileSystem extends FilterFileSystem {
         }catch(IOException ioe){
           // rename failed, log error and delete temp files
           LOG.error("PhasedFileSystem failed to commit file : " + fPath 
-              + " error : " + ioe.getMessage()); 
+                    + " error : " + ioe.getMessage()); 
           fs.delete(fInfo.getTempPath());
         }
       }else{
@@ -225,81 +225,81 @@ public class PhasedFileSystem extends FilterFileSystem {
   }
   
   @Override
-  public boolean setReplication(
-      Path src, short replication)
-      throws IOException {
+    public boolean setReplication(
+                                  Path src, short replication)
+    throws IOException {
     // throw IOException for interface compatibility with 
     // base class. 
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public boolean rename(
-      Path src, Path dst)
-      throws IOException {
+    public boolean rename(
+                          Path src, Path dst)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public boolean delete(
-      Path f)
-      throws IOException {
+    public boolean delete(
+                          Path f)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   /** @deprecated */ @Deprecated
-  @Override
-  public void lock(
-      Path f, boolean shared)
-      throws IOException {
+    @Override
+    public void lock(
+                     Path f, boolean shared)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   /** @deprecated */ @Deprecated
-  @Override
-  public void release(
-      Path f)
-      throws IOException {
+    @Override
+    public void release(
+                        Path f)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public void copyFromLocalFile(
-      boolean delSrc, Path src, Path dst)
-      throws IOException {
+    public void copyFromLocalFile(
+                                  boolean delSrc, Path src, Path dst)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public void copyToLocalFile(
-      boolean delSrc, Path src, Path dst)
-      throws IOException {
+    public void copyToLocalFile(
+                                boolean delSrc, Path src, Path dst)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public Path startLocalOutput(
-      Path fsOutputFile, Path tmpLocalFile)
-      throws IOException {
+    public Path startLocalOutput(
+                                 Path fsOutputFile, Path tmpLocalFile)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
- }
+  }
 
   @Override
-  public void completeLocalOutput(
-      Path fsOutputFile, Path tmpLocalFile)
-      throws IOException {
+    public void completeLocalOutput(
+                                    Path fsOutputFile, Path tmpLocalFile)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
- }
+  }
 
   @Override
-  public String[][] getFileCacheHints(
-      Path f, long start, long len)
-      throws IOException {
+    public String[][] getFileCacheHints(
+                                        Path f, long start, long len)
+    throws IOException {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
   @Override
-  public String getName() {
+    public String getName() {
     throw new UnsupportedOperationException("Operation not supported");  
   }
 
@@ -318,28 +318,28 @@ public class PhasedFileSystem extends FilterFileSystem {
       return openFileStream;
     }
     public void setOpenFileStream(
-        OutputStream openFileStream) {
+                                  OutputStream openFileStream) {
       this.openFileStream = openFileStream;
     }
     public Path getFinalPath() {
       return finalPath;
     }
     public void setFinalPath(
-        Path finalPath) {
+                             Path finalPath) {
       this.finalPath = finalPath;
     }
     public boolean isOverwrite() {
       return overwrite;
     }
     public void setOverwrite(
-        boolean overwrite) {
+                             boolean overwrite) {
       this.overwrite = overwrite;
     }
     public Path getTempPath() {
       return tempPath;
     }
     public void setTempPath(
-        Path tempPath) {
+                            Path tempPath) {
       this.tempPath = tempPath;
     }
   }

+ 127 - 127
src/java/org/apache/hadoop/mapred/TaskCompletionEvent.java

@@ -12,137 +12,137 @@ import org.apache.hadoop.io.WritableUtils;
  *
  */
 public class TaskCompletionEvent implements Writable{
-    static public enum Status {FAILED, SUCCEEDED, OBSOLETE};
+  static public enum Status {FAILED, SUCCEEDED, OBSOLETE};
     
-    private int eventId ; 
-    private String taskTrackerHttp ;
-    private String taskId ;
-    Status status ; 
-    boolean isMap = false ;
-    private int idWithinJob;
-    public static final TaskCompletionEvent[] EMPTY_ARRAY = 
-        new TaskCompletionEvent[0];
-    /**
-     * Default constructor for Writable.
-     *
-     */
-    public TaskCompletionEvent(){}
-    /**
-     * Constructor. eventId should be created externally and incremented
-     * per event for each job. 
-     * @param eventId event id, event id should be unique and assigned in
-     *  incrementally, starting from 0. 
-     * @param taskId task id
-     * @param status task's status 
-     * @param taskTrackerHttp task tracker's host:port for http. 
-     */
-    public TaskCompletionEvent(int eventId, 
-        String taskId,
-        int idWithinJob,
-        boolean isMap,
-        Status status, 
-        String taskTrackerHttp){
+  private int eventId ; 
+  private String taskTrackerHttp ;
+  private String taskId ;
+  Status status ; 
+  boolean isMap = false ;
+  private int idWithinJob;
+  public static final TaskCompletionEvent[] EMPTY_ARRAY = 
+    new TaskCompletionEvent[0];
+  /**
+   * Default constructor for Writable.
+   *
+   */
+  public TaskCompletionEvent(){}
+  /**
+   * Constructor. eventId should be created externally and incremented
+   * per event for each job. 
+   * @param eventId event id, event id should be unique and assigned in
+   *  incrementally, starting from 0. 
+   * @param taskId task id
+   * @param status task's status 
+   * @param taskTrackerHttp task tracker's host:port for http. 
+   */
+  public TaskCompletionEvent(int eventId, 
+                             String taskId,
+                             int idWithinJob,
+                             boolean isMap,
+                             Status status, 
+                             String taskTrackerHttp){
       
-        this.taskId = taskId ;
-        this.idWithinJob = idWithinJob ;
-        this.isMap = isMap ;
-        this.eventId = eventId ; 
-        this.status =status ; 
-        this.taskTrackerHttp = taskTrackerHttp ;
-    }
-    /**
-     * Returns event Id. 
-     * @return event id
-     */
-    public int getEventId() {
-        return eventId;
-    }
-    /**
-     * Returns task id. 
-     * @return task id
-     */
-    public String getTaskId() {
-        return taskId;
-    }
-    /**
-     * Returns enum Status.SUCESS or Status.FAILURE.
-     * @return task tracker status
-     */
-    public Status getTaskStatus() {
-        return status;
-    }
-    /**
-     * http location of the tasktracker where this task ran. 
-     * @return http location of tasktracker user logs
-     */
-    public String getTaskTrackerHttp() {
-        return taskTrackerHttp;
-    }
-    /**
-     * set event Id. should be assigned incrementally starting from 0. 
-     * @param eventId
-     */
-    public void setEventId(
-        int eventId) {
-        this.eventId = eventId;
-    }
-    /**
-     * Sets task id. 
-     * @param taskId
-     */
-    public void setTaskId(
-        String taskId) {
-        this.taskId = taskId;
-    }
-    /**
-     * Set task status. 
-     * @param status
-     */
-    public void setTaskStatus(
-        Status status) {
-        this.status = status;
-    }
-    /**
-     * Set task tracker http location. 
-     * @param taskTrackerHttp
-     */
-    public void setTaskTrackerHttp(
-        String taskTrackerHttp) {
-        this.taskTrackerHttp = taskTrackerHttp;
-    }
+    this.taskId = taskId ;
+    this.idWithinJob = idWithinJob ;
+    this.isMap = isMap ;
+    this.eventId = eventId ; 
+    this.status =status ; 
+    this.taskTrackerHttp = taskTrackerHttp ;
+  }
+  /**
+   * Returns event Id. 
+   * @return event id
+   */
+  public int getEventId() {
+    return eventId;
+  }
+  /**
+   * Returns task id. 
+   * @return task id
+   */
+  public String getTaskId() {
+    return taskId;
+  }
+  /**
+   * Returns enum Status.SUCESS or Status.FAILURE.
+   * @return task tracker status
+   */
+  public Status getTaskStatus() {
+    return status;
+  }
+  /**
+   * http location of the tasktracker where this task ran. 
+   * @return http location of tasktracker user logs
+   */
+  public String getTaskTrackerHttp() {
+    return taskTrackerHttp;
+  }
+  /**
+   * set event Id. should be assigned incrementally starting from 0. 
+   * @param eventId
+   */
+  public void setEventId(
+                         int eventId) {
+    this.eventId = eventId;
+  }
+  /**
+   * Sets task id. 
+   * @param taskId
+   */
+  public void setTaskId(
+                        String taskId) {
+    this.taskId = taskId;
+  }
+  /**
+   * Set task status. 
+   * @param status
+   */
+  public void setTaskStatus(
+                            Status status) {
+    this.status = status;
+  }
+  /**
+   * Set task tracker http location. 
+   * @param taskTrackerHttp
+   */
+  public void setTaskTrackerHttp(
+                                 String taskTrackerHttp) {
+    this.taskTrackerHttp = taskTrackerHttp;
+  }
     
-    public String toString(){
-        StringBuffer buf = new StringBuffer(); 
-        buf.append("Task Id : "); 
-        buf.append( taskId ) ; 
-        buf.append(", Status : ");  
-        buf.append( status.name() ) ;
-        return buf.toString();
-    }
+  public String toString(){
+    StringBuffer buf = new StringBuffer(); 
+    buf.append("Task Id : "); 
+    buf.append( taskId ) ; 
+    buf.append(", Status : ");  
+    buf.append( status.name() ) ;
+    return buf.toString();
+  }
     
-    public boolean isMapTask() {
-        return isMap;
-    }
+  public boolean isMapTask() {
+    return isMap;
+  }
     
-    public int idWithinJob() {
-      return idWithinJob;
-    }
-    //////////////////////////////////////////////
-    // Writable
-    //////////////////////////////////////////////
-    public void write(DataOutput out) throws IOException {
-        WritableUtils.writeString(out, taskId); 
-        WritableUtils.writeVInt(out, idWithinJob);
-        out.writeBoolean(isMap);
-        WritableUtils.writeEnum(out, status); 
-        WritableUtils.writeString(out, taskTrackerHttp);
-    }
+  public int idWithinJob() {
+    return idWithinJob;
+  }
+  //////////////////////////////////////////////
+  // Writable
+  //////////////////////////////////////////////
+  public void write(DataOutput out) throws IOException {
+    WritableUtils.writeString(out, taskId); 
+    WritableUtils.writeVInt(out, idWithinJob);
+    out.writeBoolean(isMap);
+    WritableUtils.writeEnum(out, status); 
+    WritableUtils.writeString(out, taskTrackerHttp);
+  }
   
-    public void readFields(DataInput in) throws IOException {
-        this.taskId = WritableUtils.readString(in) ; 
-        this.idWithinJob = WritableUtils.readVInt(in);
-        this.isMap = in.readBoolean();
-        this.status = WritableUtils.readEnum(in, Status.class);
-        this.taskTrackerHttp = WritableUtils.readString(in);
-    }
+  public void readFields(DataInput in) throws IOException {
+    this.taskId = WritableUtils.readString(in) ; 
+    this.idWithinJob = WritableUtils.readVInt(in);
+    this.isMap = in.readBoolean();
+    this.status = WritableUtils.readEnum(in, Status.class);
+    this.taskTrackerHttp = WritableUtils.readString(in);
+  }
 }

+ 6 - 6
src/java/org/apache/hadoop/mapred/TaskLogAppender.java

@@ -24,13 +24,13 @@ public class TaskLogAppender extends AppenderSkeleton {
   public void activateOptions() {
     taskLogWriter = 
       new TaskLog.Writer(taskId, TaskLog.LogFilter.SYSLOG, 
-              noKeepSplits, totalLogFileSize, purgeLogSplits, logsRetainHours);
+                         noKeepSplits, totalLogFileSize, purgeLogSplits, logsRetainHours);
     try {
       taskLogWriter.init();
     } catch (IOException ioe) {
       taskLogWriter = null;
       errorHandler.error("Failed to initialize the task's logging " +
-              "infrastructure: " + StringUtils.stringifyException(ioe));
+                         "infrastructure: " + StringUtils.stringifyException(ioe));
     }
   }
   
@@ -42,7 +42,7 @@ public class TaskLogAppender extends AppenderSkeleton {
 
     if (this.layout == null) {
       errorHandler.error("No layout for appender " + name , 
-              null, ErrorCode.MISSING_LAYOUT );
+                         null, ErrorCode.MISSING_LAYOUT );
     }
     
     // Log the message to the task's log
@@ -51,8 +51,8 @@ public class TaskLogAppender extends AppenderSkeleton {
       taskLogWriter.write(logMessage.getBytes(), 0, logMessage.length());
     } catch (IOException ioe) {
       errorHandler.error("Failed to log: '" + logMessage + 
-              "' to the task's logging infrastructure with the exception: " + 
-              StringUtils.stringifyException(ioe));
+                         "' to the task's logging infrastructure with the exception: " + 
+                         StringUtils.stringifyException(ioe));
     }
   }
 
@@ -66,7 +66,7 @@ public class TaskLogAppender extends AppenderSkeleton {
         taskLogWriter.close();
       } catch (IOException ioe) {
         errorHandler.error("Failed to close the task's log with the exception: " 
-                + StringUtils.stringifyException(ioe));
+                           + StringUtils.stringifyException(ioe));
       }
     } else {
       errorHandler.error("Calling 'close' on uninitialize/closed logger");

+ 8 - 8
src/java/org/apache/hadoop/mapred/lib/MultithreadedMapRunner.java

@@ -36,7 +36,7 @@ import java.util.concurrent.TimeUnit;
  */
 public class MultithreadedMapRunner implements MapRunnable {
   private static final Log LOG =
-      LogFactory.getLog(MultithreadedMapRunner.class.getName());
+    LogFactory.getLog(MultithreadedMapRunner.class.getName());
 
   private JobConf job;
   private Mapper mapper;
@@ -45,10 +45,10 @@ public class MultithreadedMapRunner implements MapRunnable {
 
   public void configure(JobConf job) {
     int numberOfThreads =
-        job.getInt("mapred.map.multithreadedrunner.threads", 10);
+      job.getInt("mapred.map.multithreadedrunner.threads", 10);
     if (LOG.isDebugEnabled()) {
       LOG.debug("Configuring job " + job.getJobName() +
-          " to use " + numberOfThreads + " threads" );
+                " to use " + numberOfThreads + " threads" );
     }
 
     this.job = job;
@@ -75,7 +75,7 @@ public class MultithreadedMapRunner implements MapRunnable {
         // If threads are not available from the thread-pool this method
         // will block until there is a thread available.
         executorService.execute(
-            new MapperInvokeRunable(key, value, output, reporter));
+                                new MapperInvokeRunable(key, value, output, reporter));
 
         // Checking if a Mapper.map within a Runnable has generated an
         // IOException. If so we rethrow it to force an abort of the Map
@@ -92,7 +92,7 @@ public class MultithreadedMapRunner implements MapRunnable {
 
       if (LOG.isDebugEnabled()) {
         LOG.debug("Finished dispatching all Mappper.map calls, job "
-            + job.getJobName());
+                  + job.getJobName());
       }
 
       // Graceful shutdown of the Threadpool, it will let all scheduled
@@ -105,7 +105,7 @@ public class MultithreadedMapRunner implements MapRunnable {
         while (!executorService.awaitTermination(100, TimeUnit.MILLISECONDS)) {
           if (LOG.isDebugEnabled()) {
             LOG.debug("Awaiting all running Mappper.map calls to finish, job "
-                + job.getJobName());
+                      + job.getJobName());
           }
 
           // Checking if a Mapper.map within a Runnable has generated an
@@ -141,7 +141,7 @@ public class MultithreadedMapRunner implements MapRunnable {
       }
 
     } finally {
-        mapper.close();
+      mapper.close();
     }
   }
 
@@ -165,7 +165,7 @@ public class MultithreadedMapRunner implements MapRunnable {
      * @param reporter
      */
     public MapperInvokeRunable(WritableComparable key, Writable value,
-        OutputCollector output, Reporter reporter) {
+                               OutputCollector output, Reporter reporter) {
       this.key = key;
       this.value = value;
       this.output = output;

+ 5 - 5
src/java/org/apache/hadoop/record/compiler/CodeGenerator.java

@@ -28,7 +28,7 @@ import java.util.HashMap;
 abstract class CodeGenerator {
   
   private static HashMap<String, CodeGenerator> generators =
-      new HashMap<String, CodeGenerator>();
+    new HashMap<String, CodeGenerator>();
   
   static {
     register("c", new CGenerator());
@@ -45,8 +45,8 @@ abstract class CodeGenerator {
   }
   
   abstract void genCode(String file,
-      ArrayList<JFile> inclFiles,
-      ArrayList<JRecord> records,
-      String destDir,
-      ArrayList<String> options) throws IOException;
+                        ArrayList<JFile> inclFiles,
+                        ArrayList<JRecord> records,
+                        String destDir,
+                        ArrayList<String> options) throws IOException;
 }

+ 23 - 23
src/java/org/apache/hadoop/util/DiskChecker.java

@@ -10,34 +10,34 @@ import java.io.IOException;
 
 public class DiskChecker {
 
-    public static class DiskErrorException extends IOException {
-      public DiskErrorException(String msg) {
-        super(msg);
-      }
+  public static class DiskErrorException extends IOException {
+    public DiskErrorException(String msg) {
+      super(msg);
     }
+  }
     
-    public static class DiskOutOfSpaceException extends IOException {
-        public DiskOutOfSpaceException(String msg) {
-          super(msg);
-        }
-      }
+  public static class DiskOutOfSpaceException extends IOException {
+    public DiskOutOfSpaceException(String msg) {
+      super(msg);
+    }
+  }
       
-    public static void checkDir( File dir ) throws DiskErrorException {
-        if( !dir.exists() && !dir.mkdirs() )
-            throw new DiskErrorException( "can not create directory: " 
-                    + dir.toString() );
+  public static void checkDir( File dir ) throws DiskErrorException {
+    if( !dir.exists() && !dir.mkdirs() )
+      throw new DiskErrorException( "can not create directory: " 
+                                    + dir.toString() );
         
-        if ( !dir.isDirectory() )
-            throw new DiskErrorException( "not a directory: " 
-                    + dir.toString() );
+    if ( !dir.isDirectory() )
+      throw new DiskErrorException( "not a directory: " 
+                                    + dir.toString() );
             
-        if( !dir.canRead() )
-            throw new DiskErrorException( "directory is not readable: " 
-                    + dir.toString() );
+    if( !dir.canRead() )
+      throw new DiskErrorException( "directory is not readable: " 
+                                    + dir.toString() );
             
-        if( !dir.canWrite() )
-            throw new DiskErrorException( "directory is not writable: " 
-                    + dir.toString() );
-    }
+    if( !dir.canWrite() )
+      throw new DiskErrorException( "directory is not writable: " 
+                                    + dir.toString() );
+  }
 
 }

+ 1 - 1
src/java/org/apache/hadoop/util/HostsFileReader.java

@@ -44,7 +44,7 @@ public class HostsFileReader {
     excludes.clear();
     
     if (!includesFile.equals("")) {
-        readFileToSet(includesFile, includes);
+      readFileToSet(includesFile, includes);
     }
     if (!excludesFile.equals("")) {
       readFileToSet(excludesFile, excludes);

+ 5 - 5
src/java/org/apache/hadoop/util/Progressable.java

@@ -9,8 +9,8 @@ import java.io.IOException;
  * @author Owen O'Malley
  */
 public interface Progressable {
-    /** callback for reporting progress. Used by DFSclient to report
-     * progress while writing a block of DFS file.
-     */
-    public void progress() throws IOException;
-}
+  /** callback for reporting progress. Used by DFSclient to report
+   * progress while writing a block of DFS file.
+   */
+  public void progress() throws IOException;
+}

+ 60 - 60
src/test/org/apache/hadoop/dfs/TestReplicationPolicy.java

@@ -17,17 +17,17 @@ public class TestReplicationPolicy extends TestCase {
   private static NameNode namenode;
   private static FSNamesystem.ReplicationTargetChooser replicator;
   private static DatanodeDescriptor dataNodes[] = 
-         new DatanodeDescriptor[] {
-    new DatanodeDescriptor(new DatanodeID("h1:5020", "0", -1), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h2:5020", "0", -1), "/d1/r1"),
-    new DatanodeDescriptor(new DatanodeID("h3:5020", "0", -1), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h4:5020", "0", -1), "/d1/r2"),
-    new DatanodeDescriptor(new DatanodeID("h5:5020", "0", -1), "/d2/r3"),
-    new DatanodeDescriptor(new DatanodeID("h6:5020", "0", -1), "/d2/r3")
- };
+    new DatanodeDescriptor[] {
+      new DatanodeDescriptor(new DatanodeID("h1:5020", "0", -1), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h2:5020", "0", -1), "/d1/r1"),
+      new DatanodeDescriptor(new DatanodeID("h3:5020", "0", -1), "/d1/r2"),
+      new DatanodeDescriptor(new DatanodeID("h4:5020", "0", -1), "/d1/r2"),
+      new DatanodeDescriptor(new DatanodeID("h5:5020", "0", -1), "/d2/r3"),
+      new DatanodeDescriptor(new DatanodeID("h6:5020", "0", -1), "/d2/r3")
+    };
    
-private final static DatanodeDescriptor NODE = 
-  new DatanodeDescriptor(new DatanodeID("h7:5020", "0", -1), "/d2/r4");
+  private final static DatanodeDescriptor NODE = 
+    new DatanodeDescriptor(new DatanodeID("h7:5020", "0", -1), "/d2/r4");
   
   static {
     try {
@@ -47,8 +47,8 @@ private final static DatanodeDescriptor NODE =
     }
     for( int i=0; i<NUM_OF_DATANODES; i++) {
       dataNodes[i].updateHeartbeat(
-          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
+                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
     }
   }
   
@@ -62,34 +62,34 @@ private final static DatanodeDescriptor NODE =
    */
   public void testChooseTarget1() throws Exception {
     dataNodes[0].updateHeartbeat(
-              2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-              FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 4); // overloaded
+                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                 FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 4); // overloaded
 
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], null, BLOCK_SIZE);
+                                      0, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], null, BLOCK_SIZE);
+                                      1, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[0]);
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], null, BLOCK_SIZE);
+                                      2, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, dataNodes[0], null, BLOCK_SIZE);
+                                      3, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[0]);
     assertTrue(cluster.isOnSameRack(targets[0], targets[1]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[2]));
     
     targets = replicator.chooseTarget(
-        4, dataNodes[0], null, BLOCK_SIZE);
+                                      4, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[0]);
     assertTrue(cluster.isOnSameRack(targets[0], targets[1]));
@@ -97,8 +97,8 @@ private final static DatanodeDescriptor NODE =
     assertFalse(cluster.isOnSameRack(targets[0], targets[3]));
 
     dataNodes[0].updateHeartbeat(
-        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
+                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                 FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
   }
 
   /**
@@ -116,20 +116,20 @@ private final static DatanodeDescriptor NODE =
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        0, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      0, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        1, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      1, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[0]);
     
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        2, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      2, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
@@ -137,7 +137,7 @@ private final static DatanodeDescriptor NODE =
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        3, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      3, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[0]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
@@ -146,14 +146,14 @@ private final static DatanodeDescriptor NODE =
     excludedNodes = new ArrayList<DatanodeDescriptor>();
     excludedNodes.add(dataNodes[1]); 
     targets = replicator.chooseTarget(
-        4, dataNodes[0], excludedNodes, BLOCK_SIZE);
+                                      4, dataNodes[0], excludedNodes, BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[0]);
     for(int i=1; i<4; i++) {
       assertFalse(cluster.isOnSameRack(targets[0], targets[i]));
     }
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]) ||
-        cluster.isOnSameRack(targets[2], targets[3]));
+               cluster.isOnSameRack(targets[2], targets[3]));
     assertFalse(cluster.isOnSameRack(targets[1], targets[3]));
   }
 
@@ -168,46 +168,46 @@ private final static DatanodeDescriptor NODE =
   public void testChooseTarget3() throws Exception {
     // make data node 0 to be not qualified to choose
     dataNodes[0].updateHeartbeat(
-        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-        (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0); // no space
+                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                 (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0); // no space
         
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], null, BLOCK_SIZE);
+                                      0, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], null, BLOCK_SIZE);
+                                      1, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertEquals(targets[0], dataNodes[1]);
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], null, BLOCK_SIZE);
+                                      2, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertEquals(targets[0], dataNodes[1]);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, dataNodes[0], null, BLOCK_SIZE);
+                                      3, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertEquals(targets[0], dataNodes[1]);
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        4, dataNodes[0], null, BLOCK_SIZE);
+                                      4, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 4);
     assertEquals(targets[0], dataNodes[1]);
     for(int i=1; i<4; i++) {
       assertFalse(cluster.isOnSameRack(targets[0], targets[i]));
     }
     assertTrue(cluster.isOnSameRack(targets[1], targets[2]) ||
-        cluster.isOnSameRack(targets[2], targets[3]));
+               cluster.isOnSameRack(targets[2], targets[3]));
     assertFalse(cluster.isOnSameRack(targets[1], targets[3]));
 
     dataNodes[0].updateHeartbeat(
-        2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-        FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
+                                 2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                 FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0); 
   }
   
   /**
@@ -222,40 +222,40 @@ private final static DatanodeDescriptor NODE =
     // make data node 0 & 1 to be not qualified to choose: not enough disk space
     for(int i=0; i<2; i++) {
       dataNodes[i].updateHeartbeat(
-          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-          (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0);
+                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                   (FSConstants.MIN_BLOCKS_FOR_WRITE-1)*BLOCK_SIZE, 0);
     }
       
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], null, BLOCK_SIZE);
+                                      0, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], null, BLOCK_SIZE);
+                                      1, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], null, BLOCK_SIZE);
+                                      2, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(targets[0], dataNodes[0]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, dataNodes[0], null, BLOCK_SIZE);
+                                      3, dataNodes[0], null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     for(int i=0; i<3; i++) {
       assertFalse(cluster.isOnSameRack(targets[i], dataNodes[0]));
     }
     assertTrue(cluster.isOnSameRack(targets[0], targets[1]) ||
-        cluster.isOnSameRack(targets[1], targets[2]));
+               cluster.isOnSameRack(targets[1], targets[2]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[2]));
     
     for(int i=0; i<2; i++) {
       dataNodes[i].updateHeartbeat(
-          2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
-          FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
+                                   2*FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 
+                                   FSConstants.MIN_BLOCKS_FOR_WRITE*BLOCK_SIZE, 0);
     }
   }
   /**
@@ -268,20 +268,20 @@ private final static DatanodeDescriptor NODE =
   public void testChooseTarget5() throws Exception {
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, NODE, null, BLOCK_SIZE);
+                                      0, NODE, null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, NODE, null, BLOCK_SIZE);
+                                      1, NODE, null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     
     targets = replicator.chooseTarget(
-        2, NODE, null, BLOCK_SIZE);
+                                      2, NODE, null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(targets[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, NODE, null, BLOCK_SIZE);
+                                      3, NODE, null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertTrue(cluster.isOnSameRack(targets[0], targets[1]));
     assertFalse(cluster.isOnSameRack(targets[0], targets[2]));    
@@ -300,22 +300,22 @@ private final static DatanodeDescriptor NODE =
     DatanodeDescriptor[] targets;
     
     targets = replicator.chooseTarget(
-        0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1]));
     
     targets = replicator.chooseTarget(
-        3, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      3, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 3);
     assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1]));
@@ -336,16 +336,16 @@ private final static DatanodeDescriptor NODE =
 
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[0]));
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1]));
@@ -365,16 +365,16 @@ private final static DatanodeDescriptor NODE =
     
     DatanodeDescriptor[] targets;
     targets = replicator.chooseTarget(
-        0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      0, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 0);
     
     targets = replicator.chooseTarget(
-        1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      1, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 1);
     assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
     
     targets = replicator.chooseTarget(
-        2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
+                                      2, dataNodes[0], choosenNodes, null, BLOCK_SIZE);
     assertEquals(targets.length, 2);
     assertTrue(cluster.isOnSameRack(dataNodes[0], targets[0]));
     assertFalse(cluster.isOnSameRack(dataNodes[0], targets[1]));

+ 1 - 1
src/test/org/apache/hadoop/fs/s3/Jets3tS3FileSystemTest.java

@@ -5,7 +5,7 @@ import java.io.IOException;
 public class Jets3tS3FileSystemTest extends S3FileSystemBaseTest {
 
   @Override
-  public FileSystemStore getFileSystemStore() throws IOException {
+    public FileSystemStore getFileSystemStore() throws IOException {
     return null; // use default store
   }
 

+ 11 - 11
src/test/org/apache/hadoop/fs/s3/S3FileSystemBaseTest.java

@@ -21,7 +21,7 @@ public abstract class S3FileSystemBaseTest extends TestCase {
   abstract FileSystemStore getFileSystemStore() throws IOException;
 
   @Override
-  protected void setUp() throws IOException {
+    protected void setUp() throws IOException {
     Configuration conf = new Configuration();
     
     s3FileSystem = new S3FileSystem(getFileSystemStore());
@@ -34,7 +34,7 @@ public abstract class S3FileSystemBaseTest extends TestCase {
   }
 
   @Override
-  protected void tearDown() throws Exception {
+    protected void tearDown() throws Exception {
     s3FileSystem.purge();
     s3FileSystem.close();
   }
@@ -83,7 +83,7 @@ public abstract class S3FileSystemBaseTest extends TestCase {
 
   public void testListPathsRaw() throws Exception {
     Path[] testDirs = { new Path("/test/hadoop/a"), new Path("/test/hadoop/b"),
-        new Path("/test/hadoop/c/1"), };
+                        new Path("/test/hadoop/c/1"), };
     assertNull(s3FileSystem.listPaths(testDirs[0]));
 
     for (Path path : testDirs) {
@@ -136,8 +136,8 @@ public abstract class S3FileSystemBaseTest extends TestCase {
     s3FileSystem.mkdirs(path.getParent());
 
     FSDataOutputStream out = s3FileSystem.create(path, false,
-            s3FileSystem.getConf().getInt("io.file.buffer.size", 4096), 
-            (short) 1, BLOCK_SIZE);
+                                                 s3FileSystem.getConf().getInt("io.file.buffer.size", 4096), 
+                                                 (short) 1, BLOCK_SIZE);
     out.write(data, 0, len);
     out.close();
 
@@ -175,16 +175,16 @@ public abstract class S3FileSystemBaseTest extends TestCase {
     
     try {
       s3FileSystem.create(path, false,
-              s3FileSystem.getConf().getInt("io.file.buffer.size", 4096),
-              (short) 1, 128);
+                          s3FileSystem.getConf().getInt("io.file.buffer.size", 4096),
+                          (short) 1, 128);
       fail("Should throw IOException.");
     } catch (IOException e) {
       // Expected
     }
     
     FSDataOutputStream out = s3FileSystem.create(path, true,
-            s3FileSystem.getConf().getInt("io.file.buffer.size", 4096), 
-            (short) 1, BLOCK_SIZE);
+                                                 s3FileSystem.getConf().getInt("io.file.buffer.size", 4096), 
+                                                 (short) 1, BLOCK_SIZE);
     out.write(data, 0, BLOCK_SIZE / 2);
     out.close();
     
@@ -328,8 +328,8 @@ public abstract class S3FileSystemBaseTest extends TestCase {
 
   private void createEmptyFile(Path path) throws IOException {
     FSDataOutputStream out = s3FileSystem.create(path, false,
-            s3FileSystem.getConf().getInt("io.file.buffer.size", 4096),
-            (short) 1, BLOCK_SIZE);
+                                                 s3FileSystem.getConf().getInt("io.file.buffer.size", 4096),
+                                                 (short) 1, BLOCK_SIZE);
     out.write(data, 0, BLOCK_SIZE);
     out.close();
   }

+ 1 - 1
src/test/org/apache/hadoop/fs/s3/TestINode.java

@@ -23,7 +23,7 @@ public class TestINode extends TestCase {
     assertEquals("Length", 1, deserializedBlocks.length);
     assertEquals("Id", blocks[0].getId(), deserializedBlocks[0].getId());
     assertEquals("Length", blocks[0].getLength(), deserializedBlocks[0]
-        .getLength());
+                 .getLength());
 
   }
   

+ 1 - 1
src/test/org/apache/hadoop/fs/s3/TestInMemoryS3FileSystem.java

@@ -8,7 +8,7 @@ import org.apache.hadoop.conf.Configuration;
 public class TestInMemoryS3FileSystem extends S3FileSystemBaseTest {
 
   @Override
-  public FileSystemStore getFileSystemStore() throws IOException {
+    public FileSystemStore getFileSystemStore() throws IOException {
     return new InMemoryFileSystemStore();
   }
   

+ 10 - 10
src/test/org/apache/hadoop/io/retry/TestRetryProxy.java

@@ -22,7 +22,7 @@ public class TestRetryProxy extends TestCase {
   private UnreliableImplementation unreliableImpl;
   
   @Override
-  protected void setUp() throws Exception {
+    protected void setUp() throws Exception {
     unreliableImpl = new UnreliableImplementation();
   }
 
@@ -53,7 +53,7 @@ public class TestRetryProxy extends TestCase {
   
   public void testRetryForever() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl, RETRY_FOREVER);
     unreliable.alwaysSucceeds();
     unreliable.failsOnceThenSucceeds();
     unreliable.failsTenTimesThenSucceeds();
@@ -61,8 +61,8 @@ public class TestRetryProxy extends TestCase {
   
   public void testRetryUpToMaximumCountWithFixedSleep() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl,
-        retryUpToMaximumCountWithFixedSleep(8, 1, TimeUnit.NANOSECONDS));
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+                        retryUpToMaximumCountWithFixedSleep(8, 1, TimeUnit.NANOSECONDS));
     unreliable.alwaysSucceeds();
     unreliable.failsOnceThenSucceeds();
     try {
@@ -75,8 +75,8 @@ public class TestRetryProxy extends TestCase {
   
   public void testRetryUpToMaximumTimeWithFixedSleep() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl,
-        retryUpToMaximumTimeWithFixedSleep(80, 10, TimeUnit.NANOSECONDS));
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+                        retryUpToMaximumTimeWithFixedSleep(80, 10, TimeUnit.NANOSECONDS));
     unreliable.alwaysSucceeds();
     unreliable.failsOnceThenSucceeds();
     try {
@@ -89,8 +89,8 @@ public class TestRetryProxy extends TestCase {
   
   public void testRetryUpToMaximumCountWithProportionalSleep() throws UnreliableException {
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl,
-        retryUpToMaximumCountWithProportionalSleep(8, 1, TimeUnit.NANOSECONDS));
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+                        retryUpToMaximumCountWithProportionalSleep(8, 1, TimeUnit.NANOSECONDS));
     unreliable.alwaysSucceeds();
     unreliable.failsOnceThenSucceeds();
     try {
@@ -106,8 +106,8 @@ public class TestRetryProxy extends TestCase {
       Collections.<Class<? extends Exception>, RetryPolicy>singletonMap(FatalException.class, TRY_ONCE_THEN_FAIL);
     
     UnreliableInterface unreliable = (UnreliableInterface)
-    RetryProxy.create(UnreliableInterface.class, unreliableImpl,
-        retryByException(RETRY_FOREVER, exceptionToPolicyMap));
+      RetryProxy.create(UnreliableInterface.class, unreliableImpl,
+                        retryByException(RETRY_FOREVER, exceptionToPolicyMap));
     unreliable.failsOnceThenSucceeds();
     try {
       unreliable.alwaysfailsWithFatalException();

+ 7 - 7
src/test/org/apache/hadoop/net/TestNetworkTopology.java

@@ -8,13 +8,13 @@ import junit.framework.TestCase;
 public class TestNetworkTopology extends TestCase {
   private final static NetworkTopology cluster = new NetworkTopology();
   private final static DatanodeDescriptor dataNodes[] = new DatanodeDescriptor[] {
-      new DatanodeDescriptor(new DatanodeID("h1:5020", "0", -1), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h2:5020", "0", -1), "/d1/r1"),
-      new DatanodeDescriptor(new DatanodeID("h3:5020", "0", -1), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h4:5020", "0", -1), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h5:5020", "0", -1), "/d1/r2"),
-      new DatanodeDescriptor(new DatanodeID("h6:5020", "0", -1), "/d2/r3"),
-      new DatanodeDescriptor(new DatanodeID("h7:5020", "0", -1), "/d2/r3")
+    new DatanodeDescriptor(new DatanodeID("h1:5020", "0", -1), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("h2:5020", "0", -1), "/d1/r1"),
+    new DatanodeDescriptor(new DatanodeID("h3:5020", "0", -1), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h4:5020", "0", -1), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h5:5020", "0", -1), "/d1/r2"),
+    new DatanodeDescriptor(new DatanodeID("h6:5020", "0", -1), "/d2/r3"),
+    new DatanodeDescriptor(new DatanodeID("h7:5020", "0", -1), "/d2/r3")
   };
   private final static DatanodeDescriptor NODE = 
     new DatanodeDescriptor(new DatanodeID("h8:5020", "0", -1), "/d2/r4");

+ 59 - 59
src/test/org/apache/hadoop/util/TestReflectionUtils.java

@@ -8,37 +8,37 @@ import junit.framework.TestCase;
 
 public class TestReflectionUtils extends TestCase {
 
-    private static Class toConstruct[] = { String.class, TestReflectionUtils.class, HashMap.class };
-    private Throwable failure = null;
+  private static Class toConstruct[] = { String.class, TestReflectionUtils.class, HashMap.class };
+  private Throwable failure = null;
 
-    public void setUp() {
-      ReflectionUtils.clearCache();
-    }
+  public void setUp() {
+    ReflectionUtils.clearCache();
+  }
     
-    public void testCache() throws Exception {
-      assertEquals(0, cacheSize());
-      doTestCache();
-      assertEquals(toConstruct.length, cacheSize());
-      ReflectionUtils.clearCache();
-      assertEquals(0, cacheSize());
-    }
+  public void testCache() throws Exception {
+    assertEquals(0, cacheSize());
+    doTestCache();
+    assertEquals(toConstruct.length, cacheSize());
+    ReflectionUtils.clearCache();
+    assertEquals(0, cacheSize());
+  }
     
     
-    private void doTestCache() {
-      for (int i=0; i<toConstruct.length; i++) {
-          Class cl = toConstruct[i];
-          Object x = ReflectionUtils.newInstance(cl, null);
-          Object y = ReflectionUtils.newInstance(cl, null);
-          assertEquals(cl, x.getClass());
-          assertEquals(cl, y.getClass());
-      }
+  private void doTestCache() {
+    for (int i=0; i<toConstruct.length; i++) {
+      Class cl = toConstruct[i];
+      Object x = ReflectionUtils.newInstance(cl, null);
+      Object y = ReflectionUtils.newInstance(cl, null);
+      assertEquals(cl, x.getClass());
+      assertEquals(cl, y.getClass());
     }
+  }
     
-    public void testThreadSafe() throws Exception {
-      Thread[] th = new Thread[32];
-      for (int i=0; i<th.length; i++) {
-          th[i] = new Thread() {
-            public void run() {
+  public void testThreadSafe() throws Exception {
+    Thread[] th = new Thread[32];
+    for (int i=0; i<th.length; i++) {
+      th[i] = new Thread() {
+          public void run() {
             try {
               doTestCache();
             } catch (Throwable t) {
@@ -46,46 +46,46 @@ public class TestReflectionUtils extends TestCase {
             }
           }
         };
-        th[i].start();
-      }
-      for (int i=0; i<th.length; i++) {
-        th[i].join();
-      }
-      if (failure != null) {
-        failure.printStackTrace();
-        fail(failure.getMessage());
-      }
+      th[i].start();
     }
-    
-    private int cacheSize() throws Exception {
-      return ReflectionUtils.getCacheSize();
+    for (int i=0; i<th.length; i++) {
+      th[i].join();
     }
-    
-    public void testCantCreate() {
-      try {
-        ReflectionUtils.newInstance(NoDefaultCtor.class, null);
-        fail("invalid call should fail");
-      } catch (RuntimeException rte) {
-        assertEquals(NoSuchMethodException.class, rte.getCause().getClass());
-      }
+    if (failure != null) {
+      failure.printStackTrace();
+      fail(failure.getMessage());
     }
+  }
     
-    public void testCacheDoesntLeak() throws Exception {
-      int iterations=9999; // very fast, but a bit less reliable - bigger numbers force GC
-      for (int i=0; i<iterations; i++) {
-        URLClassLoader loader = new URLClassLoader(new URL[0], getClass().getClassLoader());
-        Class cl = Class.forName("org.apache.hadoop.util.TestReflectionUtils$LoadedInChild", false, loader);
-        Object o = ReflectionUtils.newInstance(cl, null);
-        assertEquals(cl, o.getClass());
-      }
-      System.gc();
-      assertTrue(cacheSize()+" too big", cacheSize()<iterations);
-    }
+  private int cacheSize() throws Exception {
+    return ReflectionUtils.getCacheSize();
+  }
     
-    private static class LoadedInChild {
+  public void testCantCreate() {
+    try {
+      ReflectionUtils.newInstance(NoDefaultCtor.class, null);
+      fail("invalid call should fail");
+    } catch (RuntimeException rte) {
+      assertEquals(NoSuchMethodException.class, rte.getCause().getClass());
     }
+  }
     
-    public static class NoDefaultCtor {
-      public NoDefaultCtor(int x) {}
+  public void testCacheDoesntLeak() throws Exception {
+    int iterations=9999; // very fast, but a bit less reliable - bigger numbers force GC
+    for (int i=0; i<iterations; i++) {
+      URLClassLoader loader = new URLClassLoader(new URL[0], getClass().getClassLoader());
+      Class cl = Class.forName("org.apache.hadoop.util.TestReflectionUtils$LoadedInChild", false, loader);
+      Object o = ReflectionUtils.newInstance(cl, null);
+      assertEquals(cl, o.getClass());
     }
+    System.gc();
+    assertTrue(cacheSize()+" too big", cacheSize()<iterations);
+  }
+    
+  private static class LoadedInChild {
+  }
+    
+  public static class NoDefaultCtor {
+    public NoDefaultCtor(int x) {}
+  }
 }

+ 2 - 2
src/test/testjar/ExternalMapperReducer.java

@@ -26,7 +26,7 @@ public class ExternalMapperReducer
   }
 
   public void map(WritableComparable key, Writable value,
-    OutputCollector output, Reporter reporter)
+                  OutputCollector output, Reporter reporter)
     throws IOException {
     
     if (value instanceof Text) {
@@ -37,7 +37,7 @@ public class ExternalMapperReducer
   }
 
   public void reduce(WritableComparable key, Iterator values,
-    OutputCollector output, Reporter reporter)
+                     OutputCollector output, Reporter reporter)
     throws IOException {
     
     int count = 0;