Ver Fonte

HADOOP-1148. More indentation and spacing fixes.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@530556 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting há 18 anos atrás
pai
commit
91c1614934
100 ficheiros alterados com 3027 adições e 3025 exclusões
  1. 1 1
      build.xml
  2. 2 2
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJob.java
  3. 13 13
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
  4. 27 27
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
  5. 17 17
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
  6. 2 2
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java
  7. 46 46
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
  8. 23 23
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java
  9. 102 102
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
  10. 3 3
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
  11. 34 34
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
  12. 3 3
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java
  13. 59 59
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
  14. 14 14
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
  15. 9 9
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
  16. 7 7
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
  17. 6 6
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
  18. 10 10
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java
  19. 27 27
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
  20. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java
  21. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java
  22. 3 3
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java
  23. 2 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
  24. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java
  25. 2 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
  26. 39 39
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
  27. 2 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java
  28. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
  29. 6 6
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java
  30. 4 4
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
  31. 3 3
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java
  32. 1 1
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java
  33. 2 2
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java
  34. 13 13
      src/java/org/apache/hadoop/conf/Configuration.java
  35. 3 3
      src/java/org/apache/hadoop/dfs/Block.java
  36. 70 70
      src/java/org/apache/hadoop/dfs/BlockCommand.java
  37. 14 14
      src/java/org/apache/hadoop/dfs/ClientProtocol.java
  38. 37 37
      src/java/org/apache/hadoop/dfs/DFSClient.java
  39. 2 2
      src/java/org/apache/hadoop/dfs/DFSFileInfo.java
  40. 3 3
      src/java/org/apache/hadoop/dfs/DFSck.java
  41. 99 99
      src/java/org/apache/hadoop/dfs/DataNode.java
  42. 156 156
      src/java/org/apache/hadoop/dfs/DataStorage.java
  43. 23 23
      src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
  44. 7 7
      src/java/org/apache/hadoop/dfs/DatanodeID.java
  45. 16 16
      src/java/org/apache/hadoop/dfs/DatanodeInfo.java
  46. 9 9
      src/java/org/apache/hadoop/dfs/DatanodeProtocol.java
  47. 9 9
      src/java/org/apache/hadoop/dfs/DatanodeRegistration.java
  48. 2 2
      src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java
  49. 16 16
      src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
  50. 45 45
      src/java/org/apache/hadoop/dfs/FSDataset.java
  51. 66 66
      src/java/org/apache/hadoop/dfs/FSDirectory.java
  52. 77 77
      src/java/org/apache/hadoop/dfs/FSEditLog.java
  53. 235 235
      src/java/org/apache/hadoop/dfs/FSImage.java
  54. 232 230
      src/java/org/apache/hadoop/dfs/FSNamesystem.java
  55. 7 7
      src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java
  56. 8 8
      src/java/org/apache/hadoop/dfs/IncorrectVersionException.java
  57. 18 18
      src/java/org/apache/hadoop/dfs/JspHelper.java
  58. 86 86
      src/java/org/apache/hadoop/dfs/NameNode.java
  59. 24 24
      src/java/org/apache/hadoop/dfs/NamenodeFsck.java
  60. 7 7
      src/java/org/apache/hadoop/dfs/NamespaceInfo.java
  61. 2 2
      src/java/org/apache/hadoop/dfs/SafeModeException.java
  62. 3 3
      src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
  63. 159 159
      src/java/org/apache/hadoop/dfs/Storage.java
  64. 4 4
      src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java
  65. 4 4
      src/java/org/apache/hadoop/filecache/DistributedCache.java
  66. 22 22
      src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  67. 7 7
      src/java/org/apache/hadoop/fs/DF.java
  68. 9 9
      src/java/org/apache/hadoop/fs/FSDataInputStream.java
  69. 2 2
      src/java/org/apache/hadoop/fs/FSDataOutputStream.java
  70. 716 716
      src/java/org/apache/hadoop/fs/FileSystem.java
  71. 21 21
      src/java/org/apache/hadoop/fs/FileUtil.java
  72. 11 11
      src/java/org/apache/hadoop/fs/FilterFileSystem.java
  73. 54 54
      src/java/org/apache/hadoop/fs/FsShell.java
  74. 2 2
      src/java/org/apache/hadoop/fs/InMemoryFileSystem.java
  75. 4 4
      src/java/org/apache/hadoop/fs/LocalFileSystem.java
  76. 2 2
      src/java/org/apache/hadoop/fs/Path.java
  77. 11 11
      src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  78. 21 21
      src/java/org/apache/hadoop/fs/s3/S3FileSystem.java
  79. 46 46
      src/java/org/apache/hadoop/fs/s3/S3InputStream.java
  80. 49 49
      src/java/org/apache/hadoop/fs/s3/S3OutputStream.java
  81. 1 1
      src/java/org/apache/hadoop/io/BytesWritable.java
  82. 1 1
      src/java/org/apache/hadoop/io/GenericWritable.java
  83. 17 17
      src/java/org/apache/hadoop/io/MapFile.java
  84. 5 5
      src/java/org/apache/hadoop/io/ObjectWritable.java
  85. 4 4
      src/java/org/apache/hadoop/io/SequenceFile.java
  86. 2 2
      src/java/org/apache/hadoop/io/Text.java
  87. 1 1
      src/java/org/apache/hadoop/io/VersionedWritable.java
  88. 16 16
      src/java/org/apache/hadoop/io/WritableComparator.java
  89. 6 6
      src/java/org/apache/hadoop/io/WritableUtils.java
  90. 2 2
      src/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
  91. 2 2
      src/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
  92. 1 1
      src/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
  93. 19 19
      src/java/org/apache/hadoop/io/compress/LzoCodec.java
  94. 3 3
      src/java/org/apache/hadoop/io/compress/lzo/LzoCompressor.java
  95. 4 4
      src/java/org/apache/hadoop/io/compress/lzo/LzoDecompressor.java
  96. 7 7
      src/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
  97. 4 4
      src/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
  98. 2 2
      src/java/org/apache/hadoop/io/retry/RetryPolicies.java
  99. 13 13
      src/java/org/apache/hadoop/ipc/Client.java
  100. 11 11
      src/java/org/apache/hadoop/ipc/RPC.java

+ 1 - 1
build.xml

@@ -482,7 +482,7 @@
   	
   	
   	<checkstyle config="${test.src.dir}/checkstyle.xml"
   	<checkstyle config="${test.src.dir}/checkstyle.xml"
   		failOnViolation="false">
   		failOnViolation="false">
-      <fileset dir="${src.dir}" includes="**/*.java"/>
+      <fileset dir="${src.dir}" includes="**/*.java" excludes="**/generated/**"/>
       <formatter type="xml" toFile="${test.build.dir}/checkstyle-errors.xml"/>
       <formatter type="xml" toFile="${test.build.dir}/checkstyle-errors.xml"/>
   	</checkstyle>
   	</checkstyle>
   	
   	

+ 2 - 2
src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJob.java

@@ -80,7 +80,7 @@ import org.apache.hadoop.mapred.RunningJob;
 public class ValueAggregatorJob {
 public class ValueAggregatorJob {
 
 
   public static JobControl createValueAggregatorJobs(String args[])
   public static JobControl createValueAggregatorJobs(String args[])
-      throws IOException {
+    throws IOException {
     JobControl theControl = new JobControl("ValueAggregatorJobs");
     JobControl theControl = new JobControl("ValueAggregatorJobs");
     ArrayList dependingJobs = new ArrayList();
     ArrayList dependingJobs = new ArrayList();
     JobConf aJobConf = createValueAggregatorJob(args);
     JobConf aJobConf = createValueAggregatorJob(args);
@@ -98,7 +98,7 @@ public class ValueAggregatorJob {
    * @throws IOException
    * @throws IOException
    */
    */
   public static JobConf createValueAggregatorJob(String args[])
   public static JobConf createValueAggregatorJob(String args[])
-      throws IOException {
+    throws IOException {
 
 
     if (args.length < 2) {
     if (args.length < 2) {
       System.out.println("usage: inputDirs outDir [numOfReducer [textinputformat|seq [specfile [jobName]]]]");
       System.out.println("usage: inputDirs outDir [numOfReducer [textinputformat|seq [specfile [jobName]]]]");

+ 13 - 13
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java

@@ -60,17 +60,17 @@ public abstract class HAbstractScanner implements HScannerInterface {
       String column = col.toString();
       String column = col.toString();
       try {
       try {
         int colpos = column.indexOf(":") + 1;
         int colpos = column.indexOf(":") + 1;
-        if(colpos == 0) {
+        if (colpos == 0) {
           throw new IllegalArgumentException("Column name has no family indicator.");
           throw new IllegalArgumentException("Column name has no family indicator.");
         }
         }
 
 
         String columnkey = column.substring(colpos);
         String columnkey = column.substring(colpos);
 
 
-        if(columnkey == null || columnkey.length() == 0) {
+        if (columnkey == null || columnkey.length() == 0) {
           this.matchType = MATCH_TYPE.FAMILY_ONLY;
           this.matchType = MATCH_TYPE.FAMILY_ONLY;
           this.family = column.substring(0, colpos);
           this.family = column.substring(0, colpos);
 
 
-        } else if(isRegexPattern.matcher(columnkey).matches()) {
+        } else if (isRegexPattern.matcher(columnkey).matches()) {
           this.matchType = MATCH_TYPE.REGEX;
           this.matchType = MATCH_TYPE.REGEX;
           this.columnMatcher = Pattern.compile(column);
           this.columnMatcher = Pattern.compile(column);
 
 
@@ -86,13 +86,13 @@ public abstract class HAbstractScanner implements HScannerInterface {
     // Matching method
     // Matching method
     
     
     boolean matches(Text col) throws IOException {
     boolean matches(Text col) throws IOException {
-      if(this.matchType == MATCH_TYPE.SIMPLE) {
+      if (this.matchType == MATCH_TYPE.SIMPLE) {
         return col.equals(this.col);
         return col.equals(this.col);
         
         
-      } else if(this.matchType == MATCH_TYPE.FAMILY_ONLY) {
+      } else if (this.matchType == MATCH_TYPE.FAMILY_ONLY) {
         return col.toString().startsWith(this.family);
         return col.toString().startsWith(this.family);
         
         
-      } else if(this.matchType == MATCH_TYPE.REGEX) {
+      } else if (this.matchType == MATCH_TYPE.REGEX) {
         return this.columnMatcher.matcher(col.toString()).matches();
         return this.columnMatcher.matcher(col.toString()).matches();
         
         
       } else {
       } else {
@@ -121,7 +121,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
     for(int i = 0; i < targetCols.length; i++) {
     for(int i = 0; i < targetCols.length; i++) {
       Text family = HStoreKey.extractFamily(targetCols[i]);
       Text family = HStoreKey.extractFamily(targetCols[i]);
       Vector<ColumnMatcher> matchers = okCols.get(family);
       Vector<ColumnMatcher> matchers = okCols.get(family);
-      if(matchers == null) {
+      if (matchers == null) {
         matchers = new Vector<ColumnMatcher>();
         matchers = new Vector<ColumnMatcher>();
       }
       }
       matchers.add(new ColumnMatcher(targetCols[i]));
       matchers.add(new ColumnMatcher(targetCols[i]));
@@ -144,11 +144,11 @@ public abstract class HAbstractScanner implements HScannerInterface {
     Text column = keys[i].getColumn();
     Text column = keys[i].getColumn();
     Text family = HStoreKey.extractFamily(column);
     Text family = HStoreKey.extractFamily(column);
     Vector<ColumnMatcher> matchers = okCols.get(family);
     Vector<ColumnMatcher> matchers = okCols.get(family);
-    if(matchers == null) {
+    if (matchers == null) {
       return false;
       return false;
     }
     }
     for(int m = 0; m < matchers.size(); m++) {
     for(int m = 0; m < matchers.size(); m++) {
-      if(matchers.get(m).matches(column)) {
+      if (matchers.get(m).matches(column)) {
         return true;
         return true;
       }
       }
     }
     }
@@ -203,7 +203,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
     // Grab all the values that match this row/timestamp
     // Grab all the values that match this row/timestamp
 
 
     boolean insertedItem = false;
     boolean insertedItem = false;
-    if(chosenRow != null) {
+    if (chosenRow != null) {
       key.setRow(chosenRow);
       key.setRow(chosenRow);
       key.setVersion(chosenTimestamp);
       key.setVersion(chosenTimestamp);
       key.setColumn(new Text(""));
       key.setColumn(new Text(""));
@@ -215,7 +215,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
               && (keys[i].getRow().compareTo(chosenRow) == 0)
               && (keys[i].getRow().compareTo(chosenRow) == 0)
               && (keys[i].getTimestamp() == chosenTimestamp)) {
               && (keys[i].getTimestamp() == chosenTimestamp)) {
 
 
-          if(columnMatch(i)) {
+          if (columnMatch(i)) {
             outbuf.reset();
             outbuf.reset();
             vals[i].write(outbuf);
             vals[i].write(outbuf);
             byte byteresults[] = outbuf.getData();
             byte byteresults[] = outbuf.getData();
@@ -226,7 +226,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
             insertedItem = true;
             insertedItem = true;
           }
           }
 
 
-          if (! getNext(i)) {
+          if (!getNext(i)) {
             closeSubScanner(i);
             closeSubScanner(i);
           }
           }
         }
         }
@@ -237,7 +237,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
         while((keys[i] != null)
         while((keys[i] != null)
               && ((keys[i].getRow().compareTo(chosenRow) <= 0)
               && ((keys[i].getRow().compareTo(chosenRow) <= 0)
                   || (keys[i].getTimestamp() > this.timestamp)
                   || (keys[i].getTimestamp() > this.timestamp)
-                  || (! columnMatch(i)))) {
+                  || (!columnMatch(i)))) {
 
 
           getNext(i);
           getNext(i);
         }
         }

+ 27 - 27
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java

@@ -95,12 +95,12 @@ public class HClient extends HGlobals implements HConstants {
   }
   }
 
 
   public synchronized void openTable(Text tableName) throws IOException {
   public synchronized void openTable(Text tableName) throws IOException {
-    if(closed) {
+    if (closed) {
       throw new IllegalStateException("client is not open");
       throw new IllegalStateException("client is not open");
     }
     }
 
 
     tableServers = tablesToServers.get(tableName);
     tableServers = tablesToServers.get(tableName);
-    if(tableServers == null ) {                 // We don't know where the table is
+    if (tableServers == null) {                 // We don't know where the table is
       findTableInMeta(tableName);               // Load the information from meta
       findTableInMeta(tableName);               // Load the information from meta
     }
     }
   }
   }
@@ -108,9 +108,9 @@ public class HClient extends HGlobals implements HConstants {
   private void findTableInMeta(Text tableName) throws IOException {
   private void findTableInMeta(Text tableName) throws IOException {
     TreeMap<Text, TableInfo> metaServers = tablesToServers.get(META_TABLE_NAME);
     TreeMap<Text, TableInfo> metaServers = tablesToServers.get(META_TABLE_NAME);
     
     
-    if(metaServers == null) {                   // Don't know where the meta is
+    if (metaServers == null) {                   // Don't know where the meta is
       loadMetaFromRoot(tableName);
       loadMetaFromRoot(tableName);
-      if(tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
+      if (tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
         // All we really wanted was the meta or root table
         // All we really wanted was the meta or root table
         return;
         return;
       }
       }
@@ -119,7 +119,7 @@ public class HClient extends HGlobals implements HConstants {
 
 
     tableServers = new TreeMap<Text, TableInfo>();
     tableServers = new TreeMap<Text, TableInfo>();
     for(Iterator<TableInfo> i = metaServers.tailMap(tableName).values().iterator();
     for(Iterator<TableInfo> i = metaServers.tailMap(tableName).values().iterator();
-        i.hasNext(); ) {
+        i.hasNext();) {
       
       
       TableInfo t = i.next();
       TableInfo t = i.next();
       
       
@@ -133,7 +133,7 @@ public class HClient extends HGlobals implements HConstants {
    */
    */
   private void loadMetaFromRoot(Text tableName) throws IOException {
   private void loadMetaFromRoot(Text tableName) throws IOException {
     locateRootRegion();
     locateRootRegion();
-    if(tableName.equals(ROOT_TABLE_NAME)) {   // All we really wanted was the root
+    if (tableName.equals(ROOT_TABLE_NAME)) {   // All we really wanted was the root
       return;
       return;
     }
     }
     scanRoot();
     scanRoot();
@@ -144,7 +144,7 @@ public class HClient extends HGlobals implements HConstants {
    * could be.
    * could be.
    */
    */
   private void locateRootRegion() throws IOException {
   private void locateRootRegion() throws IOException {
-    if(master == null) {
+    if (master == null) {
       master = (HMasterInterface)RPC.getProxy(HMasterInterface.class, 
       master = (HMasterInterface)RPC.getProxy(HMasterInterface.class, 
                                               HMasterInterface.versionID,
                                               HMasterInterface.versionID,
                                               masterLocation.getInetSocketAddress(), conf);
                                               masterLocation.getInetSocketAddress(), conf);
@@ -157,7 +157,7 @@ public class HClient extends HGlobals implements HConstants {
       while(rootRegionLocation == null && localTimeouts < numTimeouts) {
       while(rootRegionLocation == null && localTimeouts < numTimeouts) {
         rootRegionLocation = master.findRootRegion();
         rootRegionLocation = master.findRootRegion();
 
 
-        if(rootRegionLocation == null) {
+        if (rootRegionLocation == null) {
           try {
           try {
             Thread.sleep(clientTimeout);
             Thread.sleep(clientTimeout);
 
 
@@ -166,7 +166,7 @@ public class HClient extends HGlobals implements HConstants {
           localTimeouts++;
           localTimeouts++;
         }
         }
       }
       }
-      if(rootRegionLocation == null) {
+      if (rootRegionLocation == null) {
         throw new IOException("Timed out trying to locate root region");
         throw new IOException("Timed out trying to locate root region");
       }
       }
       
       
@@ -174,7 +174,7 @@ public class HClient extends HGlobals implements HConstants {
       
       
       HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
       HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
 
 
-      if(rootRegion.getRegionInfo(rootRegionInfo.regionName) != null) {
+      if (rootRegion.getRegionInfo(rootRegionInfo.regionName) != null) {
         tableServers = new TreeMap<Text, TableInfo>();
         tableServers = new TreeMap<Text, TableInfo>();
         tableServers.put(startRow, new TableInfo(rootRegionInfo, rootRegionLocation));
         tableServers.put(startRow, new TableInfo(rootRegionInfo, rootRegionLocation));
         tablesToServers.put(ROOT_TABLE_NAME, tableServers);
         tablesToServers.put(ROOT_TABLE_NAME, tableServers);
@@ -184,7 +184,7 @@ public class HClient extends HGlobals implements HConstants {
       
       
     } while(rootRegionLocation == null && tries++ < numRetries);
     } while(rootRegionLocation == null && tries++ < numRetries);
     
     
-    if(rootRegionLocation == null) {
+    if (rootRegionLocation == null) {
       closed = true;
       closed = true;
       throw new IOException("unable to locate root region server");
       throw new IOException("unable to locate root region server");
     }
     }
@@ -220,7 +220,7 @@ public class HClient extends HGlobals implements HConstants {
         HRegionInfo regionInfo = new HRegionInfo();
         HRegionInfo regionInfo = new HRegionInfo();
         regionInfo.readFields(inbuf);
         regionInfo.readFields(inbuf);
         
         
-        if(! regionInfo.tableDesc.getName().equals(tableName)) {
+        if (!regionInfo.tableDesc.getName().equals(tableName)) {
           // We're done
           // We're done
           break;
           break;
         }
         }
@@ -245,7 +245,7 @@ public class HClient extends HGlobals implements HConstants {
 
 
     HRegionInterface server = servers.get(regionServer.toString());
     HRegionInterface server = servers.get(regionServer.toString());
     
     
-    if(server == null) {                                // Get a connection
+    if (server == null) {                                // Get a connection
       
       
       server = (HRegionInterface)RPC.waitForProxy(HRegionInterface.class, 
       server = (HRegionInterface)RPC.waitForProxy(HRegionInterface.class, 
                                                   HRegionInterface.versionID, regionServer.getInetSocketAddress(), conf);
                                                   HRegionInterface.versionID, regionServer.getInetSocketAddress(), conf);
@@ -257,7 +257,7 @@ public class HClient extends HGlobals implements HConstants {
 
 
   /** Close the connection to the HRegionServer */
   /** Close the connection to the HRegionServer */
   public synchronized void close() throws IOException {
   public synchronized void close() throws IOException {
-    if(! closed) {
+    if (!closed) {
       RPC.stopClient();
       RPC.stopClient();
       closed = true;
       closed = true;
     }
     }
@@ -274,13 +274,13 @@ public class HClient extends HGlobals implements HConstants {
     TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
     TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
     
     
     TreeMap<Text, TableInfo> metaTables = tablesToServers.get(META_TABLE_NAME);
     TreeMap<Text, TableInfo> metaTables = tablesToServers.get(META_TABLE_NAME);
-    if(metaTables == null) {
+    if (metaTables == null) {
       // Meta is not loaded yet so go do that
       // Meta is not loaded yet so go do that
       loadMetaFromRoot(META_TABLE_NAME);
       loadMetaFromRoot(META_TABLE_NAME);
       metaTables = tablesToServers.get(META_TABLE_NAME);
       metaTables = tablesToServers.get(META_TABLE_NAME);
     }
     }
 
 
-    for(Iterator<TableInfo>i = metaTables.values().iterator(); i.hasNext(); ) {
+    for(Iterator<TableInfo>i = metaTables.values().iterator(); i.hasNext();) {
       TableInfo t = i.next();
       TableInfo t = i.next();
       HRegionInterface server = getHRegionConnection(t.serverAddress);
       HRegionInterface server = getHRegionConnection(t.serverAddress);
       HScannerInterface scanner = null;
       HScannerInterface scanner = null;
@@ -297,7 +297,7 @@ public class HClient extends HGlobals implements HConstants {
 
 
           // Only examine the rows where the startKey is zero length
           // Only examine the rows where the startKey is zero length
           
           
-          if(info.startKey.getLength() == 0) {
+          if (info.startKey.getLength() == 0) {
             uniqueTables.add(info.tableDesc);
             uniqueTables.add(info.tableDesc);
           }
           }
           results.clear();
           results.clear();
@@ -311,7 +311,7 @@ public class HClient extends HGlobals implements HConstants {
   }
   }
 
 
   private TableInfo getTableInfo(Text row) {
   private TableInfo getTableInfo(Text row) {
-    if(tableServers == null) {
+    if (tableServers == null) {
       throw new IllegalStateException("Must open table first");
       throw new IllegalStateException("Must open table first");
     }
     }
     
     
@@ -335,7 +335,7 @@ public class HClient extends HGlobals implements HConstants {
                                                                           info.regionInfo.regionName, row, column, numVersions);
                                                                           info.regionInfo.regionName, row, column, numVersions);
     
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-    for(int i = 0 ; i < values.length; i++) {
+    for(int i = 0; i < values.length; i++) {
       bytes.add(values[i].get());
       bytes.add(values[i].get());
     }
     }
     return bytes.toArray(new byte[values.length][]);
     return bytes.toArray(new byte[values.length][]);
@@ -351,7 +351,7 @@ public class HClient extends HGlobals implements HConstants {
                                                                           info.regionInfo.regionName, row, column, timestamp, numVersions);
                                                                           info.regionInfo.regionName, row, column, timestamp, numVersions);
     
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-    for(int i = 0 ; i < values.length; i++) {
+    for(int i = 0; i < values.length; i++) {
       bytes.add(values[i].get());
       bytes.add(values[i].get());
     }
     }
     return bytes.toArray(new byte[values.length][]);
     return bytes.toArray(new byte[values.length][]);
@@ -369,7 +369,7 @@ public class HClient extends HGlobals implements HConstants {
    * Return the specified columns.
    * Return the specified columns.
    */
    */
   public HScannerInterface obtainScanner(Text[] columns, Text startRow) throws IOException {
   public HScannerInterface obtainScanner(Text[] columns, Text startRow) throws IOException {
-    if(tableServers == null) {
+    if (tableServers == null) {
       throw new IllegalStateException("Must open table first");
       throw new IllegalStateException("Must open table first");
     }
     }
     return new ClientScanner(columns, startRow);
     return new ClientScanner(columns, startRow);
@@ -481,11 +481,11 @@ public class HClient extends HGlobals implements HConstants {
      * Returns false if there are no more scanners.
      * Returns false if there are no more scanners.
      */
      */
     private boolean nextScanner() throws IOException {
     private boolean nextScanner() throws IOException {
-      if(scanner != null) {
+      if (scanner != null) {
         scanner.close();
         scanner.close();
       }
       }
       currentRegion += 1;
       currentRegion += 1;
-      if(currentRegion == regions.length) {
+      if (currentRegion == regions.length) {
         close();
         close();
         return false;
         return false;
       }
       }
@@ -505,13 +505,13 @@ public class HClient extends HGlobals implements HConstants {
      * @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
      * @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
      */
      */
     public boolean next(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException {
     public boolean next(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException {
-      if(closed) {
+      if (closed) {
         return false;
         return false;
       }
       }
       boolean status = scanner.next(key, results);
       boolean status = scanner.next(key, results);
-      if(! status) {
+      if (!status) {
         status = nextScanner();
         status = nextScanner();
-        if(status) {
+        if (status) {
           status = scanner.next(key, results);
           status = scanner.next(key, results);
         }
         }
       }
       }
@@ -522,7 +522,7 @@ public class HClient extends HGlobals implements HConstants {
      * @see org.apache.hadoop.hbase.HScannerInterface#close()
      * @see org.apache.hadoop.hbase.HScannerInterface#close()
      */
      */
     public void close() throws IOException {
     public void close() throws IOException {
-      if(scanner != null) {
+      if (scanner != null) {
         scanner.close();
         scanner.close();
       }
       }
       server = null;
       server = null;

+ 17 - 17
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java

@@ -101,12 +101,12 @@ public class HLog {
       newlog.close();
       newlog.close();
     }
     }
     
     
-    if(fs.exists(srcDir)) {
+    if (fs.exists(srcDir)) {
       
       
-      if(! fs.delete(srcDir)) {
+      if (!fs.delete(srcDir)) {
         LOG.error("Cannot delete: " + srcDir);
         LOG.error("Cannot delete: " + srcDir);
         
         
-        if(! FileUtil.fullyDelete(new File(srcDir.toString()))) {
+        if (!FileUtil.fullyDelete(new File(srcDir.toString()))) {
           throw new IOException("Cannot delete: " + srcDir);
           throw new IOException("Cannot delete: " + srcDir);
         }
         }
       }
       }
@@ -127,7 +127,7 @@ public class HLog {
     this.conf = conf;
     this.conf = conf;
     this.logSeqNum = 0;
     this.logSeqNum = 0;
 
 
-    if(fs.exists(dir)) {
+    if (fs.exists(dir)) {
       throw new IOException("Target HLog directory already exists: " + dir);
       throw new IOException("Target HLog directory already exists: " + dir);
     }
     }
     fs.mkdirs(dir);
     fs.mkdirs(dir);
@@ -154,7 +154,7 @@ public class HLog {
 
 
       Vector<Path> toDeleteList = new Vector<Path>();
       Vector<Path> toDeleteList = new Vector<Path>();
       synchronized(this) {
       synchronized(this) {
-        if(closed) {
+        if (closed) {
           throw new IOException("Cannot roll log; log is closed");
           throw new IOException("Cannot roll log; log is closed");
         }
         }
 
 
@@ -174,10 +174,10 @@ public class HLog {
 
 
         // Close the current writer (if any), and grab a new one.
         // Close the current writer (if any), and grab a new one.
         
         
-        if(writer != null) {
+        if (writer != null) {
           writer.close();
           writer.close();
           
           
-          if(filenum > 0) {
+          if (filenum > 0) {
             outputfiles.put(logSeqNum-1, computeFilename(filenum-1));
             outputfiles.put(logSeqNum-1, computeFilename(filenum-1));
           }
           }
         }
         }
@@ -192,10 +192,10 @@ public class HLog {
         // over all the regions.
         // over all the regions.
 
 
         long oldestOutstandingSeqNum = Long.MAX_VALUE;
         long oldestOutstandingSeqNum = Long.MAX_VALUE;
-        for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext(); ) {
+        for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext();) {
           long curSeqNum = it.next().longValue();
           long curSeqNum = it.next().longValue();
           
           
-          if(curSeqNum < oldestOutstandingSeqNum) {
+          if (curSeqNum < oldestOutstandingSeqNum) {
             oldestOutstandingSeqNum = curSeqNum;
             oldestOutstandingSeqNum = curSeqNum;
           }
           }
         }
         }
@@ -205,10 +205,10 @@ public class HLog {
 
 
         LOG.debug("removing old log files");
         LOG.debug("removing old log files");
         
         
-        for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext();) {
           long maxSeqNum = it.next().longValue();
           long maxSeqNum = it.next().longValue();
           
           
-          if(maxSeqNum < oldestOutstandingSeqNum) {
+          if (maxSeqNum < oldestOutstandingSeqNum) {
             Path p = outputfiles.get(maxSeqNum);
             Path p = outputfiles.get(maxSeqNum);
             it.remove();
             it.remove();
             toDeleteList.add(p);
             toDeleteList.add(p);
@@ -221,7 +221,7 @@ public class HLog {
 
 
       // Actually delete them, if any!
       // Actually delete them, if any!
 
 
-      for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext(); ) {
+      for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext();) {
         Path p = it.next();
         Path p = it.next();
         fs.delete(p);
         fs.delete(p);
       }
       }
@@ -262,7 +262,7 @@ public class HLog {
    * We need to seize a lock on the writer so that writes are atomic.
    * We need to seize a lock on the writer so that writes are atomic.
    */
    */
   public synchronized void append(Text regionName, Text tableName, Text row, TreeMap<Text, byte[]> columns, long timestamp) throws IOException {
   public synchronized void append(Text regionName, Text tableName, Text row, TreeMap<Text, byte[]> columns, long timestamp) throws IOException {
-    if(closed) {
+    if (closed) {
       throw new IOException("Cannot append; log is closed");
       throw new IOException("Cannot append; log is closed");
     }
     }
     
     
@@ -273,12 +273,12 @@ public class HLog {
     // that don't have any flush yet, the relevant operation is the
     // that don't have any flush yet, the relevant operation is the
     // first one that's been added.
     // first one that's been added.
     
     
-    if(regionToLastFlush.get(regionName) == null) {
+    if (regionToLastFlush.get(regionName) == null) {
       regionToLastFlush.put(regionName, seqNum[0]);
       regionToLastFlush.put(regionName, seqNum[0]);
     }
     }
 
 
     int counter = 0;
     int counter = 0;
-    for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext();) {
       Text column = it.next();
       Text column = it.next();
       byte[] val = columns.get(column);
       byte[] val = columns.get(column);
       HLogKey logKey = new HLogKey(regionName, tableName, row, seqNum[counter++]);
       HLogKey logKey = new HLogKey(regionName, tableName, row, seqNum[counter++]);
@@ -333,11 +333,11 @@ public class HLog {
 
 
   /** Complete the cache flush */
   /** Complete the cache flush */
   public synchronized void completeCacheFlush(Text regionName, Text tableName, long logSeqId) throws IOException {
   public synchronized void completeCacheFlush(Text regionName, Text tableName, long logSeqId) throws IOException {
-    if(closed) {
+    if (closed) {
       return;
       return;
     }
     }
     
     
-    if(! insideCacheFlush) {
+    if (!insideCacheFlush) {
       throw new IOException("Impossible situation: inside completeCacheFlush(), but 'insideCacheFlush' flag is false");
       throw new IOException("Impossible situation: inside completeCacheFlush(), but 'insideCacheFlush' flag is false");
     }
     }
     
     

+ 2 - 2
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java

@@ -80,10 +80,10 @@ public class HLogKey implements WritableComparable {
     HLogKey other = (HLogKey) o;
     HLogKey other = (HLogKey) o;
     int result = this.regionName.compareTo(other.regionName);
     int result = this.regionName.compareTo(other.regionName);
     
     
-    if(result == 0) {
+    if (result == 0) {
       result = this.row.compareTo(other.row);
       result = this.row.compareTo(other.row);
       
       
-      if(result == 0) {
+      if (result == 0) {
         
         
         if (this.logSeqNum < other.logSeqNum) {
         if (this.logSeqNum < other.logSeqNum) {
           result = -1;
           result = -1;

+ 46 - 46
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java

@@ -108,7 +108,7 @@ public class HMaster extends HGlobals
       };
       };
       Text firstRow = new Text();
       Text firstRow = new Text();
   
   
-      while((! closed)) {
+      while((!closed)) {
         int metaRegions = 0;
         int metaRegions = 0;
         while(rootRegionLocation == null) {
         while(rootRegionLocation == null) {
           try {
           try {
@@ -155,8 +155,8 @@ public class HMaster extends HGlobals
             HServerInfo storedInfo = null;
             HServerInfo storedInfo = null;
             synchronized(serversToServerInfo) {
             synchronized(serversToServerInfo) {
               storedInfo = serversToServerInfo.get(serverName);
               storedInfo = serversToServerInfo.get(serverName);
-              if(storedInfo == null
-                 || storedInfo.getStartCode() != startCode) {
+              if (storedInfo == null
+                  || storedInfo.getStartCode() != startCode) {
               
               
                 // The current assignment is no good; load the region.
                 // The current assignment is no good; load the region.
   
   
@@ -261,8 +261,8 @@ public class HMaster extends HGlobals
           HServerInfo storedInfo = null;
           HServerInfo storedInfo = null;
           synchronized(serversToServerInfo) {
           synchronized(serversToServerInfo) {
             storedInfo = serversToServerInfo.get(serverName);
             storedInfo = serversToServerInfo.get(serverName);
-            if(storedInfo == null
-               || storedInfo.getStartCode() != startCode) {
+            if (storedInfo == null
+                || storedInfo.getStartCode() != startCode) {
             
             
               // The current assignment is no good; load the region.
               // The current assignment is no good; load the region.
 
 
@@ -285,16 +285,16 @@ public class HMaster extends HGlobals
     }
     }
 
 
     public void run() {
     public void run() {
-      while((! closed)) {
+      while((!closed)) {
         MetaRegion region = null;
         MetaRegion region = null;
         
         
         while(region == null) {
         while(region == null) {
           synchronized(metaRegionsToScan) {
           synchronized(metaRegionsToScan) {
-            if(metaRegionsToScan.size() != 0) {
+            if (metaRegionsToScan.size() != 0) {
               region = metaRegionsToScan.remove(0);
               region = metaRegionsToScan.remove(0);
             }
             }
           }
           }
-          if(region == null) {
+          if (region == null) {
             try {
             try {
               metaRegionsToScan.wait();
               metaRegionsToScan.wait();
               
               
@@ -307,7 +307,7 @@ public class HMaster extends HGlobals
         
         
         synchronized(knownMetaRegions) {
         synchronized(knownMetaRegions) {
           knownMetaRegions.put(region.startKey, region);
           knownMetaRegions.put(region.startKey, region);
-          if(rootScanned && knownMetaRegions.size() == numMetaRegions) {
+          if (rootScanned && knownMetaRegions.size() == numMetaRegions) {
             allMetaRegionsScanned = true;
             allMetaRegionsScanned = true;
             allMetaRegionsScanned.notifyAll();
             allMetaRegionsScanned.notifyAll();
           }
           }
@@ -319,7 +319,7 @@ public class HMaster extends HGlobals
           
           
           } catch(InterruptedException ex) {
           } catch(InterruptedException ex) {
           }
           }
-          if(! allMetaRegionsScanned) {
+          if (!allMetaRegionsScanned) {
             break;                              // A region must have split
             break;                              // A region must have split
           }
           }
           
           
@@ -328,7 +328,7 @@ public class HMaster extends HGlobals
           Vector<MetaRegion> v = new Vector<MetaRegion>();
           Vector<MetaRegion> v = new Vector<MetaRegion>();
           v.addAll(knownMetaRegions.values());
           v.addAll(knownMetaRegions.values());
           
           
-          for(Iterator<MetaRegion> i = v.iterator(); i.hasNext(); ) {
+          for(Iterator<MetaRegion> i = v.iterator(); i.hasNext();) {
             scanRegion(i.next());
             scanRegion(i.next());
           }
           }
         } while(true);
         } while(true);
@@ -391,12 +391,12 @@ public class HMaster extends HGlobals
 
 
     // Make sure the root directory exists!
     // Make sure the root directory exists!
     
     
-    if(! fs.exists(dir)) {
+    if (!fs.exists(dir)) {
       fs.mkdirs(dir);
       fs.mkdirs(dir);
     }
     }
 
 
     Path rootRegionDir = HStoreFile.getHRegionDir(dir, rootRegionInfo.regionName);
     Path rootRegionDir = HStoreFile.getHRegionDir(dir, rootRegionInfo.regionName);
-    if(! fs.exists(rootRegionDir)) {
+    if (!fs.exists(rootRegionDir)) {
       
       
       // Bootstrap! Need to create the root region and the first meta region.
       // Bootstrap! Need to create the root region and the first meta region.
       //TODO is the root region self referential?
       //TODO is the root region self referential?
@@ -521,7 +521,7 @@ public class HMaster extends HGlobals
     synchronized(serversToServerInfo) {
     synchronized(serversToServerInfo) {
       storedInfo = serversToServerInfo.get(server);
       storedInfo = serversToServerInfo.get(server);
         
         
-      if(storedInfo != null) {
+      if (storedInfo != null) {
         serversToServerInfo.remove(server);
         serversToServerInfo.remove(server);
 
 
         synchronized(msgQueue) {
         synchronized(msgQueue) {
@@ -548,7 +548,7 @@ public class HMaster extends HGlobals
     synchronized(serversToServerInfo) {
     synchronized(serversToServerInfo) {
       HServerInfo storedInfo = serversToServerInfo.get(server);
       HServerInfo storedInfo = serversToServerInfo.get(server);
       
       
-      if(storedInfo == null) {
+      if (storedInfo == null) {
         
         
         // The HBaseMaster may have been restarted.
         // The HBaseMaster may have been restarted.
         // Tell the RegionServer to start over and call regionServerStartup()
         // Tell the RegionServer to start over and call regionServerStartup()
@@ -557,7 +557,7 @@ public class HMaster extends HGlobals
         returnMsgs[0] = new HMsg(HMsg.MSG_CALL_SERVER_STARTUP);
         returnMsgs[0] = new HMsg(HMsg.MSG_CALL_SERVER_STARTUP);
         return returnMsgs;
         return returnMsgs;
         
         
-      } else if(storedInfo.getStartCode() != serverInfo.getStartCode()) {
+      } else if (storedInfo.getStartCode() != serverInfo.getStartCode()) {
         
         
         // This state is reachable if:
         // This state is reachable if:
         //
         //
@@ -597,9 +597,9 @@ public class HMaster extends HGlobals
     // Process the kill list
     // Process the kill list
     
     
     TreeMap<Text, HRegionInfo> regionsToKill = killList.get(info.toString());
     TreeMap<Text, HRegionInfo> regionsToKill = killList.get(info.toString());
-    if(regionsToKill != null) {
+    if (regionsToKill != null) {
       for(Iterator<HRegionInfo> i = regionsToKill.values().iterator();
       for(Iterator<HRegionInfo> i = regionsToKill.values().iterator();
-          i.hasNext(); ) {
+          i.hasNext();) {
         
         
         returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE_AND_DELETE, i.next()));
         returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE_AND_DELETE, i.next()));
       }
       }
@@ -616,7 +616,7 @@ public class HMaster extends HGlobals
         case HMsg.MSG_REPORT_OPEN:
         case HMsg.MSG_REPORT_OPEN:
           HRegionInfo regionInfo = unassignedRegions.get(region.regionName);
           HRegionInfo regionInfo = unassignedRegions.get(region.regionName);
 
 
-          if(regionInfo == null) {
+          if (regionInfo == null) {
 
 
             // This Region should not have been opened.
             // This Region should not have been opened.
             // Ask the server to shut it down, but don't report it as closed.  
             // Ask the server to shut it down, but don't report it as closed.  
@@ -632,7 +632,7 @@ public class HMaster extends HGlobals
             unassignedRegions.remove(region.regionName);
             unassignedRegions.remove(region.regionName);
             assignAttempts.remove(region.regionName);
             assignAttempts.remove(region.regionName);
 
 
-            if(region.regionName.compareTo(rootRegionInfo.regionName) == 0) {
+            if (region.regionName.compareTo(rootRegionInfo.regionName) == 0) {
 
 
               // Store the Root Region location (in memory)
               // Store the Root Region location (in memory)
 
 
@@ -643,7 +643,7 @@ public class HMaster extends HGlobals
               rootRegionLocation.notifyAll();
               rootRegionLocation.notifyAll();
               break;
               break;
               
               
-            } else if(region.regionName.find(META_TABLE_NAME.toString()) == 0) {
+            } else if (region.regionName.find(META_TABLE_NAME.toString()) == 0) {
 
 
               // It's a meta region. Put it on the queue to be scanned.
               // It's a meta region. Put it on the queue to be scanned.
               
               
@@ -668,7 +668,7 @@ public class HMaster extends HGlobals
           break;
           break;
 
 
         case HMsg.MSG_REPORT_CLOSE:
         case HMsg.MSG_REPORT_CLOSE:
-          if(region.regionName.compareTo(rootRegionInfo.regionName) == 0) { // Root region
+          if (region.regionName.compareTo(rootRegionInfo.regionName) == 0) { // Root region
             rootRegionLocation = null;
             rootRegionLocation = null;
             unassignedRegions.put(region.regionName, region);
             unassignedRegions.put(region.regionName, region);
             assignAttempts.put(region.regionName, 0L);
             assignAttempts.put(region.regionName, 0L);
@@ -676,10 +676,10 @@ public class HMaster extends HGlobals
           } else {
           } else {
             boolean reassignRegion = true;
             boolean reassignRegion = true;
             
             
-            if(regionsToKill.containsKey(region.regionName)) {
+            if (regionsToKill.containsKey(region.regionName)) {
               regionsToKill.remove(region.regionName);
               regionsToKill.remove(region.regionName);
               
               
-              if(regionsToKill.size() > 0) {
+              if (regionsToKill.size() > 0) {
                 killList.put(info.toString(), regionsToKill);
                 killList.put(info.toString(), regionsToKill);
                 
                 
               } else {
               } else {
@@ -701,7 +701,7 @@ public class HMaster extends HGlobals
           break;
           break;
 
 
         case HMsg.MSG_NEW_REGION:
         case HMsg.MSG_NEW_REGION:
-          if(region.regionName.find(META_TABLE_NAME.toString()) == 0) {
+          if (region.regionName.find(META_TABLE_NAME.toString()) == 0) {
             // A meta region has split.
             // A meta region has split.
             
             
             allMetaRegionsScanned = false;
             allMetaRegionsScanned = false;
@@ -720,7 +720,7 @@ public class HMaster extends HGlobals
 
 
       // Figure out what the RegionServer ought to do, and write back.
       // Figure out what the RegionServer ought to do, and write back.
 
 
-      if(unassignedRegions.size() > 0) {
+      if (unassignedRegions.size() > 0) {
 
 
         // Open new regions as necessary
         // Open new regions as necessary
 
 
@@ -731,20 +731,20 @@ public class HMaster extends HGlobals
         long now = System.currentTimeMillis();
         long now = System.currentTimeMillis();
 
 
         for(Iterator<Text> it = unassignedRegions.keySet().iterator();
         for(Iterator<Text> it = unassignedRegions.keySet().iterator();
-            it.hasNext(); ) {
+            it.hasNext();) {
 
 
           Text curRegionName = it.next();
           Text curRegionName = it.next();
           HRegionInfo regionInfo = unassignedRegions.get(curRegionName);
           HRegionInfo regionInfo = unassignedRegions.get(curRegionName);
           long assignedTime = assignAttempts.get(curRegionName);
           long assignedTime = assignAttempts.get(curRegionName);
 
 
-          if(now - assignedTime > maxRegionOpenTime) {
+          if (now - assignedTime > maxRegionOpenTime) {
             returnMsgs.add(new HMsg(HMsg.MSG_REGION_OPEN, regionInfo));
             returnMsgs.add(new HMsg(HMsg.MSG_REGION_OPEN, regionInfo));
 
 
             assignAttempts.put(curRegionName, now);
             assignAttempts.put(curRegionName, now);
             counter++;
             counter++;
           }
           }
 
 
-          if(counter >= targetForServer) {
+          if (counter >= targetForServer) {
             break;
             break;
           }
           }
         }
         }
@@ -762,7 +762,7 @@ public class HMaster extends HGlobals
     }
     }
     
     
     public void run() {
     public void run() {
-      while(! closed) {
+      while(!closed) {
         PendingOperation op = null;
         PendingOperation op = null;
         
         
         synchronized(msgQueue) {
         synchronized(msgQueue) {
@@ -827,7 +827,7 @@ public class HMaster extends HGlobals
           byte serverBytes[] = results.get(META_COL_SERVER);
           byte serverBytes[] = results.get(META_COL_SERVER);
           String serverName = new String(serverBytes, UTF8_ENCODING);
           String serverName = new String(serverBytes, UTF8_ENCODING);
 
 
-          if(deadServer.compareTo(serverName) != 0) {
+          if (deadServer.compareTo(serverName) != 0) {
             // This isn't the server you're looking for - move along
             // This isn't the server you're looking for - move along
             continue;
             continue;
           }
           }
@@ -835,7 +835,7 @@ public class HMaster extends HGlobals
           byte startCodeBytes[] = results.get(META_COL_STARTCODE);
           byte startCodeBytes[] = results.get(META_COL_STARTCODE);
           long startCode = Long.decode(new String(startCodeBytes, UTF8_ENCODING));
           long startCode = Long.decode(new String(startCodeBytes, UTF8_ENCODING));
 
 
-          if(oldStartCode != startCode) {
+          if (oldStartCode != startCode) {
             // Close but no cigar
             // Close but no cigar
             continue;
             continue;
           }
           }
@@ -869,7 +869,7 @@ public class HMaster extends HGlobals
       // Put all the regions we found on the unassigned region list
       // Put all the regions we found on the unassigned region list
 
 
       for(Iterator<Map.Entry<Text, HRegionInfo>> i = regions.entrySet().iterator();
       for(Iterator<Map.Entry<Text, HRegionInfo>> i = regions.entrySet().iterator();
-          i.hasNext(); ) {
+          i.hasNext();) {
 
 
         Map.Entry<Text, HRegionInfo> e = i.next();
         Map.Entry<Text, HRegionInfo> e = i.next();
         Text region = e.getKey();
         Text region = e.getKey();
@@ -903,7 +903,7 @@ public class HMaster extends HGlobals
       
       
       scanMetaRegion(server, scanner, rootRegionInfo.regionName);
       scanMetaRegion(server, scanner, rootRegionInfo.regionName);
       for(Iterator<MetaRegion> i = knownMetaRegions.values().iterator();
       for(Iterator<MetaRegion> i = knownMetaRegions.values().iterator();
-          i.hasNext(); ) {
+          i.hasNext();) {
         
         
         MetaRegion r = i.next();
         MetaRegion r = i.next();
 
 
@@ -929,7 +929,7 @@ public class HMaster extends HGlobals
       // If the region closing down is a meta region then we need to update
       // If the region closing down is a meta region then we need to update
       // the ROOT table
       // the ROOT table
       
       
-      if(this.regionInfo.regionName.find(metaTableDesc.getName().toString()) == 0) {
+      if (this.regionInfo.regionName.find(metaTableDesc.getName().toString()) == 0) {
         this.rootRegion = true;
         this.rootRegion = true;
         
         
       } else {
       } else {
@@ -954,7 +954,7 @@ public class HMaster extends HGlobals
 
 
       Text metaRegionName;
       Text metaRegionName;
       HRegionInterface server;
       HRegionInterface server;
-      if(rootRegion) {
+      if (rootRegion) {
         metaRegionName = rootRegionInfo.regionName;
         metaRegionName = rootRegionInfo.regionName;
         server = client.getHRegionConnection(rootRegionLocation);
         server = client.getHRegionConnection(rootRegionLocation);
         
         
@@ -969,7 +969,7 @@ public class HMaster extends HGlobals
       server.delete(metaRegionName, clientId, lockid, META_COL_STARTCODE);
       server.delete(metaRegionName, clientId, lockid, META_COL_STARTCODE);
       server.commit(metaRegionName, clientId, lockid);
       server.commit(metaRegionName, clientId, lockid);
       
       
-      if(reassignRegion) {
+      if (reassignRegion) {
         synchronized(unassignedRegions) {
         synchronized(unassignedRegions) {
           unassignedRegions.put(regionInfo.regionName, regionInfo);
           unassignedRegions.put(regionInfo.regionName, regionInfo);
           assignAttempts.put(regionInfo.regionName, 0L);
           assignAttempts.put(regionInfo.regionName, 0L);
@@ -986,7 +986,7 @@ public class HMaster extends HGlobals
     BytesWritable startCode;
     BytesWritable startCode;
     
     
     public PendingOpenReport(HServerInfo info, Text regionName) {
     public PendingOpenReport(HServerInfo info, Text regionName) {
-      if(regionName.find(metaTableDesc.getName().toString()) == 0) {
+      if (regionName.find(metaTableDesc.getName().toString()) == 0) {
         
         
         // The region which just came on-line is a META region.
         // The region which just came on-line is a META region.
         // We need to look in the ROOT region for its information.
         // We need to look in the ROOT region for its information.
@@ -1030,7 +1030,7 @@ public class HMaster extends HGlobals
 
 
       Text metaRegionName;
       Text metaRegionName;
       HRegionInterface server;
       HRegionInterface server;
-      if(rootRegion) {
+      if (rootRegion) {
         metaRegionName = rootRegionInfo.regionName;
         metaRegionName = rootRegionInfo.regionName;
         server = client.getHRegionConnection(rootRegionLocation);
         server = client.getHRegionConnection(rootRegionLocation);
         
         
@@ -1074,13 +1074,13 @@ public class HMaster extends HGlobals
 
 
 
 
     BytesWritable bytes = server.get(metaRegionName, desc.getName(), META_COL_REGIONINFO);
     BytesWritable bytes = server.get(metaRegionName, desc.getName(), META_COL_REGIONINFO);
-    if(bytes != null && bytes.getSize() != 0) {
+    if (bytes != null && bytes.getSize() != 0) {
       byte[] infoBytes = bytes.get();
       byte[] infoBytes = bytes.get();
       DataInputBuffer inbuf = new DataInputBuffer();
       DataInputBuffer inbuf = new DataInputBuffer();
       inbuf.reset(infoBytes, infoBytes.length);
       inbuf.reset(infoBytes, infoBytes.length);
       HRegionInfo info = new HRegionInfo();
       HRegionInfo info = new HRegionInfo();
       info.readFields(inbuf);
       info.readFields(inbuf);
-      if(info.tableDesc.getName().compareTo(desc.getName()) == 0) {
+      if (info.tableDesc.getName().compareTo(desc.getName()) == 0) {
         throw new IOException("table already exists");
         throw new IOException("table already exists");
       }
       }
     }
     }
@@ -1183,7 +1183,7 @@ public class HMaster extends HGlobals
     }
     }
 
 
     for(Iterator<MetaRegion> i = knownMetaRegions.tailMap(tableName).values().iterator();
     for(Iterator<MetaRegion> i = knownMetaRegions.tailMap(tableName).values().iterator();
-        i.hasNext(); ) {
+        i.hasNext();) {
 
 
       // Find all the regions that make up this table
       // Find all the regions that make up this table
       
       
@@ -1206,7 +1206,7 @@ public class HMaster extends HGlobals
           HRegionInfo info = new HRegionInfo();
           HRegionInfo info = new HRegionInfo();
           info.readFields(inbuf);
           info.readFields(inbuf);
 
 
-          if(info.tableDesc.getName().compareTo(tableName) > 0) {
+          if (info.tableDesc.getName().compareTo(tableName) > 0) {
             break;                      // Beyond any more entries for this table
             break;                      // Beyond any more entries for this table
           }
           }
 
 
@@ -1220,12 +1220,12 @@ public class HMaster extends HGlobals
 
 
           synchronized(serversToServerInfo) {
           synchronized(serversToServerInfo) {
             HServerInfo s = serversToServerInfo.get(serverName);
             HServerInfo s = serversToServerInfo.get(serverName);
-            if(s != null && s.getStartCode() == startCode) {
+            if (s != null && s.getStartCode() == startCode) {
               
               
               // It is being served. Tell the server to stop it and not report back
               // It is being served. Tell the server to stop it and not report back
               
               
               TreeMap<Text, HRegionInfo> regionsToKill = killList.get(serverName);
               TreeMap<Text, HRegionInfo> regionsToKill = killList.get(serverName);
-              if(regionsToKill == null) {
+              if (regionsToKill == null) {
                 regionsToKill = new TreeMap<Text, HRegionInfo>();
                 regionsToKill = new TreeMap<Text, HRegionInfo>();
               }
               }
               regionsToKill.put(info.regionName, info);
               regionsToKill.put(info.regionName, info);
@@ -1233,7 +1233,7 @@ public class HMaster extends HGlobals
             }
             }
           }
           }
         }
         }
-        for(Iterator<Text> row = rowsToDelete.iterator(); row.hasNext(); ) {
+        for(Iterator<Text> row = rowsToDelete.iterator(); row.hasNext();) {
           long lockid = server.startUpdate(m.regionName, clientId, row.next());
           long lockid = server.startUpdate(m.regionName, clientId, row.next());
           server.delete(m.regionName, clientId, lockid, columns[0]);
           server.delete(m.regionName, clientId, lockid, columns[0]);
           server.commit(m.regionName, clientId, lockid);
           server.commit(m.regionName, clientId, lockid);

+ 23 - 23
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java

@@ -65,10 +65,10 @@ public class HMemcache {
 
 
     locking.obtainWriteLock();
     locking.obtainWriteLock();
     try {
     try {
-      if(snapshot != null) {
+      if (snapshot != null) {
         throw new IOException("Snapshot in progress!");
         throw new IOException("Snapshot in progress!");
       }
       }
-      if(memcache.size() == 0) {
+      if (memcache.size() == 0) {
         LOG.debug("memcache empty. Skipping snapshot");
         LOG.debug("memcache empty. Skipping snapshot");
         return retval;
         return retval;
       }
       }
@@ -99,16 +99,16 @@ public class HMemcache {
     locking.obtainWriteLock();
     locking.obtainWriteLock();
 
 
     try {
     try {
-      if(snapshot == null) {
+      if (snapshot == null) {
         throw new IOException("Snapshot not present!");
         throw new IOException("Snapshot not present!");
       }
       }
       LOG.debug("deleting snapshot");
       LOG.debug("deleting snapshot");
       
       
       for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator(); 
       for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator(); 
-          it.hasNext(); ) {
+          it.hasNext();) {
         
         
         TreeMap<HStoreKey, BytesWritable> cur = it.next();
         TreeMap<HStoreKey, BytesWritable> cur = it.next();
-        if(snapshot == cur) {
+        if (snapshot == cur) {
           it.remove();
           it.remove();
           break;
           break;
         }
         }
@@ -130,7 +130,7 @@ public class HMemcache {
   public void add(Text row, TreeMap<Text, byte[]> columns, long timestamp) {
   public void add(Text row, TreeMap<Text, byte[]> columns, long timestamp) {
     locking.obtainWriteLock();
     locking.obtainWriteLock();
     try {
     try {
-      for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext(); ) {
+      for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext();) {
         Text column = it.next();
         Text column = it.next();
         byte[] val = columns.get(column);
         byte[] val = columns.get(column);
 
 
@@ -156,7 +156,7 @@ public class HMemcache {
       results.addAll(0, result);
       results.addAll(0, result);
 
 
       for(int i = history.size()-1; i >= 0; i--) {
       for(int i = history.size()-1; i >= 0; i--) {
-        if(numVersions > 0 && results.size() >= numVersions) {
+        if (numVersions > 0 && results.size() >= numVersions) {
           break;
           break;
         }
         }
         
         
@@ -164,7 +164,7 @@ public class HMemcache {
         results.addAll(results.size(), result);
         results.addAll(results.size(), result);
       }
       }
       
       
-      if(results.size() == 0) {
+      if (results.size() == 0) {
         return null;
         return null;
         
         
       } else {
       } else {
@@ -203,16 +203,16 @@ public class HMemcache {
     
     
     SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(key);
     SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(key);
     
     
-    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext();) {
       HStoreKey itKey = it.next();
       HStoreKey itKey = it.next();
       Text itCol = itKey.getColumn();
       Text itCol = itKey.getColumn();
 
 
-      if(results.get(itCol) == null
-         && key.matchesWithoutColumn(itKey)) {
+      if (results.get(itCol) == null
+          && key.matchesWithoutColumn(itKey)) {
         BytesWritable val = tailMap.get(itKey);
         BytesWritable val = tailMap.get(itKey);
         results.put(itCol, val.get());
         results.put(itCol, val.get());
         
         
-      } else if(key.getRow().compareTo(itKey.getRow()) > 0) {
+      } else if (key.getRow().compareTo(itKey.getRow()) > 0) {
         break;
         break;
       }
       }
     }
     }
@@ -232,15 +232,15 @@ public class HMemcache {
     HStoreKey curKey = new HStoreKey(key.getRow(), key.getColumn(), key.getTimestamp());
     HStoreKey curKey = new HStoreKey(key.getRow(), key.getColumn(), key.getTimestamp());
     SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(curKey);
     SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(curKey);
 
 
-    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext();) {
       HStoreKey itKey = it.next();
       HStoreKey itKey = it.next();
       
       
-      if(itKey.matchesRowCol(curKey)) {
+      if (itKey.matchesRowCol(curKey)) {
         result.add(tailMap.get(itKey).get());
         result.add(tailMap.get(itKey).get());
         curKey.setVersion(itKey.getTimestamp() - 1);
         curKey.setVersion(itKey.getTimestamp() - 1);
       }
       }
       
       
-      if(numVersions > 0 && result.size() >= numVersions) {
+      if (numVersions > 0 && result.size() >= numVersions) {
         break;
         break;
       }
       }
     }
     }
@@ -266,7 +266,7 @@ public class HMemcache {
     Iterator<HStoreKey> keyIterators[];
     Iterator<HStoreKey> keyIterators[];
 
 
     @SuppressWarnings("unchecked")
     @SuppressWarnings("unchecked")
-      public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
+    public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
       throws IOException {
       throws IOException {
       
       
       super(timestamp, targetCols);
       super(timestamp, targetCols);
@@ -276,7 +276,7 @@ public class HMemcache {
         this.backingMaps = new TreeMap[history.size() + 1];
         this.backingMaps = new TreeMap[history.size() + 1];
         int i = 0;
         int i = 0;
         for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator();
         for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator();
-            it.hasNext(); ) {
+            it.hasNext();) {
           
           
           backingMaps[i++] = it.next();
           backingMaps[i++] = it.next();
         }
         }
@@ -290,7 +290,7 @@ public class HMemcache {
 
 
         HStoreKey firstKey = new HStoreKey(firstRow);
         HStoreKey firstKey = new HStoreKey(firstRow);
         for(i = 0; i < backingMaps.length; i++) {
         for(i = 0; i < backingMaps.length; i++) {
-          if(firstRow.getLength() != 0) {
+          if (firstRow.getLength() != 0) {
             keyIterators[i] = backingMaps[i].tailMap(firstKey).keySet().iterator();
             keyIterators[i] = backingMaps[i].tailMap(firstKey).keySet().iterator();
             
             
           } else {
           } else {
@@ -298,10 +298,10 @@ public class HMemcache {
           }
           }
           
           
           while(getNext(i)) {
           while(getNext(i)) {
-            if(! findFirstRow(i, firstRow)) {
+            if (!findFirstRow(i, firstRow)) {
               continue;
               continue;
             }
             }
-            if(columnMatch(i)) {
+            if (columnMatch(i)) {
               break;
               break;
             }
             }
           }
           }
@@ -331,7 +331,7 @@ public class HMemcache {
      * @return - true if there is more data available
      * @return - true if there is more data available
      */
      */
     boolean getNext(int i) {
     boolean getNext(int i) {
-      if(! keyIterators[i].hasNext()) {
+      if (!keyIterators[i].hasNext()) {
         closeSubScanner(i);
         closeSubScanner(i);
         return false;
         return false;
       }
       }
@@ -350,10 +350,10 @@ public class HMemcache {
 
 
     /** Shut down map iterators, and release the lock */
     /** Shut down map iterators, and release the lock */
     public void close() throws IOException {
     public void close() throws IOException {
-      if(! scannerClosed) {
+      if (!scannerClosed) {
         try {
         try {
           for(int i = 0; i < keys.length; i++) {
           for(int i = 0; i < keys.length; i++) {
-            if(keyIterators[i] != null) {
+            if (keyIterators[i] != null) {
               closeSubScanner(i);
               closeSubScanner(i);
             }
             }
           }
           }

+ 102 - 102
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java

@@ -61,21 +61,21 @@ public class HRegion implements HConstants {
     // Make sure that srcA comes first; important for key-ordering during
     // Make sure that srcA comes first; important for key-ordering during
     // write of the merged file.
     // write of the merged file.
     
     
-    if(srcA.getStartKey() == null) {
-      if(srcB.getStartKey() == null) {
+    if (srcA.getStartKey() == null) {
+      if (srcB.getStartKey() == null) {
         throw new IOException("Cannot merge two regions with null start key");
         throw new IOException("Cannot merge two regions with null start key");
       }
       }
       // A's start key is null but B's isn't. Assume A comes before B
       // A's start key is null but B's isn't. Assume A comes before B
       
       
-    } else if((srcB.getStartKey() == null)         // A is not null but B is
-        || (srcA.getStartKey().compareTo(srcB.getStartKey()) > 0)) { // A > B
+    } else if ((srcB.getStartKey() == null)         // A is not null but B is
+               || (srcA.getStartKey().compareTo(srcB.getStartKey()) > 0)) { // A > B
       
       
       HRegion tmp = srcA;
       HRegion tmp = srcA;
       srcA = srcB;
       srcA = srcB;
       srcB = tmp;
       srcB = tmp;
     }
     }
     
     
-    if (! srcA.getEndKey().equals(srcB.getStartKey())) {
+    if (!srcA.getEndKey().equals(srcB.getStartKey())) {
       throw new IOException("Cannot merge non-adjacent regions");
       throw new IOException("Cannot merge non-adjacent regions");
     }
     }
 
 
@@ -89,7 +89,7 @@ public class HRegion implements HConstants {
     Text endKey = srcB.getEndKey();
     Text endKey = srcB.getEndKey();
 
 
     Path merges = new Path(srcA.getRegionDir(), MERGEDIR);
     Path merges = new Path(srcA.getRegionDir(), MERGEDIR);
-    if(! fs.exists(merges)) {
+    if (!fs.exists(merges)) {
       fs.mkdirs(merges);
       fs.mkdirs(merges);
     }
     }
     
     
@@ -98,14 +98,14 @@ public class HRegion implements HConstants {
     
     
     Path newRegionDir = HStoreFile.getHRegionDir(merges, newRegionInfo.regionName);
     Path newRegionDir = HStoreFile.getHRegionDir(merges, newRegionInfo.regionName);
 
 
-    if(fs.exists(newRegionDir)) {
+    if (fs.exists(newRegionDir)) {
       throw new IOException("Cannot merge; target file collision at " + newRegionDir);
       throw new IOException("Cannot merge; target file collision at " + newRegionDir);
     }
     }
 
 
     LOG.info("starting merge of regions: " + srcA.getRegionName() + " and " 
     LOG.info("starting merge of regions: " + srcA.getRegionName() + " and " 
-        + srcB.getRegionName() + " new region start key is '" 
-        + (startKey == null ? "" : startKey) + "', end key is '" 
-        + (endKey == null ? "" : endKey) + "'");
+             + srcB.getRegionName() + " new region start key is '" 
+             + (startKey == null ? "" : startKey) + "', end key is '" 
+             + (endKey == null ? "" : endKey) + "'");
     
     
     // Flush each of the sources, and merge their files into a single 
     // Flush each of the sources, and merge their files into a single 
     // target for each column family.
     // target for each column family.
@@ -114,10 +114,10 @@ public class HRegion implements HConstants {
     
     
     TreeSet<HStoreFile> alreadyMerged = new TreeSet<HStoreFile>();
     TreeSet<HStoreFile> alreadyMerged = new TreeSet<HStoreFile>();
     TreeMap<Text, Vector<HStoreFile>> filesToMerge = new TreeMap<Text, Vector<HStoreFile>>();
     TreeMap<Text, Vector<HStoreFile>> filesToMerge = new TreeMap<Text, Vector<HStoreFile>>();
-    for(Iterator<HStoreFile> it = srcA.flushcache(true).iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcA.flushcache(true).iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       HStoreFile src = it.next();
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-      if(v == null) {
+      if (v == null) {
         v = new Vector<HStoreFile>();
         v = new Vector<HStoreFile>();
         filesToMerge.put(src.getColFamily(), v);
         filesToMerge.put(src.getColFamily(), v);
       }
       }
@@ -126,10 +126,10 @@ public class HRegion implements HConstants {
     
     
     LOG.debug("flushing and getting file names for region " + srcB.getRegionName());
     LOG.debug("flushing and getting file names for region " + srcB.getRegionName());
     
     
-    for(Iterator<HStoreFile> it = srcB.flushcache(true).iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcB.flushcache(true).iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       HStoreFile src = it.next();
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-      if(v == null) {
+      if (v == null) {
         v = new Vector<HStoreFile>();
         v = new Vector<HStoreFile>();
         filesToMerge.put(src.getColFamily(), v);
         filesToMerge.put(src.getColFamily(), v);
       }
       }
@@ -138,11 +138,11 @@ public class HRegion implements HConstants {
     
     
     LOG.debug("merging stores");
     LOG.debug("merging stores");
     
     
-    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext();) {
       Text colFamily = it.next();
       Text colFamily = it.next();
       Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
       Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
       HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName, 
       HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName, 
-          colFamily, Math.abs(rand.nextLong()));
+                                      colFamily, Math.abs(rand.nextLong()));
       
       
       dst.mergeStoreFiles(srcFiles, fs, conf);
       dst.mergeStoreFiles(srcFiles, fs, conf);
       alreadyMerged.addAll(srcFiles);
       alreadyMerged.addAll(srcFiles);
@@ -153,15 +153,15 @@ public class HRegion implements HConstants {
     // of any last-minute inserts
     // of any last-minute inserts
 
 
     LOG.debug("flushing changes since start of merge for region " 
     LOG.debug("flushing changes since start of merge for region " 
-        + srcA.getRegionName());
+              + srcA.getRegionName());
 
 
     filesToMerge.clear();
     filesToMerge.clear();
-    for(Iterator<HStoreFile> it = srcA.close().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcA.close().iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       HStoreFile src = it.next();
       
       
-      if(! alreadyMerged.contains(src)) {
+      if (!alreadyMerged.contains(src)) {
         Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
         Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-        if(v == null) {
+        if (v == null) {
           v = new Vector<HStoreFile>();
           v = new Vector<HStoreFile>();
           filesToMerge.put(src.getColFamily(), v);
           filesToMerge.put(src.getColFamily(), v);
         }
         }
@@ -170,14 +170,14 @@ public class HRegion implements HConstants {
     }
     }
     
     
     LOG.debug("flushing changes since start of merge for region " 
     LOG.debug("flushing changes since start of merge for region " 
-        + srcB.getRegionName());
+              + srcB.getRegionName());
     
     
-    for(Iterator<HStoreFile> it = srcB.close().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcB.close().iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       HStoreFile src = it.next();
       
       
-      if(! alreadyMerged.contains(src)) {
+      if (!alreadyMerged.contains(src)) {
         Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
         Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-        if(v == null) {
+        if (v == null) {
           v = new Vector<HStoreFile>();
           v = new Vector<HStoreFile>();
           filesToMerge.put(src.getColFamily(), v);
           filesToMerge.put(src.getColFamily(), v);
         }
         }
@@ -187,11 +187,11 @@ public class HRegion implements HConstants {
     
     
     LOG.debug("merging changes since start of merge");
     LOG.debug("merging changes since start of merge");
     
     
-    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext();) {
       Text colFamily = it.next();
       Text colFamily = it.next();
       Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
       Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
       HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName,
       HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName,
-          colFamily, Math.abs(rand.nextLong()));
+                                      colFamily, Math.abs(rand.nextLong()));
       
       
       dst.mergeStoreFiles(srcFiles, fs, conf);
       dst.mergeStoreFiles(srcFiles, fs, conf);
     }
     }
@@ -199,7 +199,7 @@ public class HRegion implements HConstants {
     // Done
     // Done
     
     
     HRegion dstRegion = new HRegion(dir, log, fs, conf, newRegionInfo,
     HRegion dstRegion = new HRegion(dir, log, fs, conf, newRegionInfo,
-        newRegionDir, null);
+                                    newRegionDir, null);
 
 
     // Get rid of merges directory
     // Get rid of merges directory
     
     
@@ -284,7 +284,7 @@ public class HRegion implements HConstants {
    * written-to before), then read it from the supplied path.
    * written-to before), then read it from the supplied path.
    */
    */
   public HRegion(Path dir, HLog log, FileSystem fs, Configuration conf, 
   public HRegion(Path dir, HLog log, FileSystem fs, Configuration conf, 
-      HRegionInfo regionInfo, Path initialFiles, Path oldLogFile) throws IOException {
+                 HRegionInfo regionInfo, Path initialFiles, Path oldLogFile) throws IOException {
     
     
     this.dir = dir;
     this.dir = dir;
     this.log = log;
     this.log = log;
@@ -303,29 +303,29 @@ public class HRegion implements HConstants {
 
 
     // Move prefab HStore files into place (if any)
     // Move prefab HStore files into place (if any)
     
     
-    if(initialFiles != null && fs.exists(initialFiles)) {
+    if (initialFiles != null && fs.exists(initialFiles)) {
       fs.rename(initialFiles, regiondir);
       fs.rename(initialFiles, regiondir);
     }
     }
 
 
     // Load in all the HStores.
     // Load in all the HStores.
     
     
     for(Iterator<Text> it = this.regionInfo.tableDesc.families().iterator();
     for(Iterator<Text> it = this.regionInfo.tableDesc.families().iterator();
-        it.hasNext(); ) {
+        it.hasNext();) {
       
       
       Text colFamily = it.next();
       Text colFamily = it.next();
       stores.put(colFamily, new HStore(dir, this.regionInfo.regionName, colFamily, 
       stores.put(colFamily, new HStore(dir, this.regionInfo.regionName, colFamily, 
-          this.regionInfo.tableDesc.getMaxVersions(), fs, oldLogFile, conf));
+                                       this.regionInfo.tableDesc.getMaxVersions(), fs, oldLogFile, conf));
     }
     }
 
 
     // Get rid of any splits or merges that were lost in-progress
     // Get rid of any splits or merges that were lost in-progress
     
     
     Path splits = new Path(regiondir, SPLITDIR);
     Path splits = new Path(regiondir, SPLITDIR);
-    if(fs.exists(splits)) {
+    if (fs.exists(splits)) {
       fs.delete(splits);
       fs.delete(splits);
     }
     }
     
     
     Path merges = new Path(regiondir, MERGEDIR);
     Path merges = new Path(regiondir, MERGEDIR);
-    if(fs.exists(merges)) {
+    if (fs.exists(merges)) {
       fs.delete(merges);
       fs.delete(merges);
     }
     }
 
 
@@ -362,7 +362,7 @@ public class HRegion implements HConstants {
   public Vector<HStoreFile> close() throws IOException {
   public Vector<HStoreFile> close() throws IOException {
     boolean shouldClose = false;
     boolean shouldClose = false;
     synchronized(writestate) {
     synchronized(writestate) {
-      if(writestate.closed) {
+      if (writestate.closed) {
         LOG.info("region " + this.regionInfo.regionName + " closed");
         LOG.info("region " + this.regionInfo.regionName + " closed");
         return new Vector<HStoreFile>();
         return new Vector<HStoreFile>();
       }
       }
@@ -376,13 +376,13 @@ public class HRegion implements HConstants {
       shouldClose = true;
       shouldClose = true;
     }
     }
 
 
-    if(! shouldClose) {
+    if (!shouldClose) {
       return null;
       return null;
       
       
     } else {
     } else {
       LOG.info("closing region " + this.regionInfo.regionName);
       LOG.info("closing region " + this.regionInfo.regionName);
       Vector<HStoreFile> allHStoreFiles = internalFlushcache();
       Vector<HStoreFile> allHStoreFiles = internalFlushcache();
-      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
         HStore store = it.next();
         HStore store = it.next();
         store.close();
         store.close();
       }
       }
@@ -406,8 +406,8 @@ public class HRegion implements HConstants {
    * Returns two brand-new (and open) HRegions
    * Returns two brand-new (and open) HRegions
    */
    */
   public HRegion[] closeAndSplit(Text midKey) throws IOException {
   public HRegion[] closeAndSplit(Text midKey) throws IOException {
-    if(((regionInfo.startKey.getLength() != 0)
-        && (regionInfo.startKey.compareTo(midKey) > 0))
+    if (((regionInfo.startKey.getLength() != 0)
+         && (regionInfo.startKey.compareTo(midKey) > 0))
         || ((regionInfo.endKey.getLength() != 0)
         || ((regionInfo.endKey.getLength() != 0)
             && (regionInfo.endKey.compareTo(midKey) < 0))) {
             && (regionInfo.endKey.compareTo(midKey) < 0))) {
       throw new IOException("Region splitkey must lie within region boundaries.");
       throw new IOException("Region splitkey must lie within region boundaries.");
@@ -419,13 +419,13 @@ public class HRegion implements HConstants {
     // or compactions until close() is called.
     // or compactions until close() is called.
     
     
     Path splits = new Path(regiondir, SPLITDIR);
     Path splits = new Path(regiondir, SPLITDIR);
-    if(! fs.exists(splits)) {
+    if (!fs.exists(splits)) {
       fs.mkdirs(splits);
       fs.mkdirs(splits);
     }
     }
     
     
     long regionAId = Math.abs(rand.nextLong());
     long regionAId = Math.abs(rand.nextLong());
     HRegionInfo regionAInfo = new HRegionInfo(regionAId, regionInfo.tableDesc, 
     HRegionInfo regionAInfo = new HRegionInfo(regionAId, regionInfo.tableDesc, 
-        regionInfo.startKey, midKey);
+                                              regionInfo.startKey, midKey);
         
         
     long regionBId = Math.abs(rand.nextLong());
     long regionBId = Math.abs(rand.nextLong());
     HRegionInfo regionBInfo
     HRegionInfo regionBInfo
@@ -434,24 +434,24 @@ public class HRegion implements HConstants {
     Path dirA = HStoreFile.getHRegionDir(splits, regionAInfo.regionName);
     Path dirA = HStoreFile.getHRegionDir(splits, regionAInfo.regionName);
     Path dirB = HStoreFile.getHRegionDir(splits, regionBInfo.regionName);
     Path dirB = HStoreFile.getHRegionDir(splits, regionBInfo.regionName);
 
 
-    if(fs.exists(dirA) || fs.exists(dirB)) {
+    if (fs.exists(dirA) || fs.exists(dirB)) {
       throw new IOException("Cannot split; target file collision at " + dirA 
       throw new IOException("Cannot split; target file collision at " + dirA 
-          + " or " + dirB);
+                            + " or " + dirB);
     }
     }
     
     
     TreeSet<HStoreFile> alreadySplit = new TreeSet<HStoreFile>();
     TreeSet<HStoreFile> alreadySplit = new TreeSet<HStoreFile>();
     Vector<HStoreFile> hstoreFilesToSplit = flushcache(true);
     Vector<HStoreFile> hstoreFilesToSplit = flushcache(true);
-    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       HStoreFile hsf = it.next();
       
       
       LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
       LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
-          + "/" + hsf.fileId());
+                + "/" + hsf.fileId());
 
 
       HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName, 
       HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName, 
-          hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                       hsf.getColFamily(), Math.abs(rand.nextLong()));
       
       
       HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName, 
       HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName, 
-          hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                       hsf.getColFamily(), Math.abs(rand.nextLong()));
       
       
       hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
       hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
       alreadySplit.add(hsf);
       alreadySplit.add(hsf);
@@ -461,18 +461,18 @@ public class HRegion implements HConstants {
     // and copy the small remainder
     // and copy the small remainder
     
     
     hstoreFilesToSplit = close();
     hstoreFilesToSplit = close();
-    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       HStoreFile hsf = it.next();
       
       
-      if(! alreadySplit.contains(hsf)) {
+      if (!alreadySplit.contains(hsf)) {
         LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
         LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
-            + "/" + hsf.fileId());
+                  + "/" + hsf.fileId());
 
 
         HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName, 
         HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName, 
-            hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                         hsf.getColFamily(), Math.abs(rand.nextLong()));
         
         
         HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName, 
         HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName, 
-            hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                         hsf.getColFamily(), Math.abs(rand.nextLong()));
         
         
         hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
         hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
       }
       }
@@ -494,7 +494,7 @@ public class HRegion implements HConstants {
     regions[1] = regionB;
     regions[1] = regionB;
     
     
     LOG.info("region split complete. new regions are: " + regions[0].getRegionName()
     LOG.info("region split complete. new regions are: " + regions[0].getRegionName()
-        + ", " + regions[1].getRegionName());
+             + ", " + regions[1].getRegionName());
     
     
     return regions;
     return regions;
   }
   }
@@ -565,10 +565,10 @@ public class HRegion implements HConstants {
     Text key = new Text();
     Text key = new Text();
     long maxSize = 0;
     long maxSize = 0;
 
 
-    for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) {
+    for(Iterator<HStore> i = stores.values().iterator(); i.hasNext();) {
       long size = i.next().getLargestFileSize(key);
       long size = i.next().getLargestFileSize(key);
       
       
-      if(size > maxSize) {                      // Largest so far
+      if (size > maxSize) {                      // Largest so far
         maxSize = size;
         maxSize = size;
         midKey.set(key);
         midKey.set(key);
       }
       }
@@ -593,9 +593,9 @@ public class HRegion implements HConstants {
   public boolean compactStores() throws IOException {
   public boolean compactStores() throws IOException {
     boolean shouldCompact = false;
     boolean shouldCompact = false;
     synchronized(writestate) {
     synchronized(writestate) {
-      if((! writestate.writesOngoing)
+      if ((!writestate.writesOngoing)
           && writestate.writesEnabled
           && writestate.writesEnabled
-          && (! writestate.closed)
+          && (!writestate.closed)
           && recentCommits > MIN_COMMITS_FOR_COMPACTION) {
           && recentCommits > MIN_COMMITS_FOR_COMPACTION) {
         
         
         writestate.writesOngoing = true;
         writestate.writesOngoing = true;
@@ -603,14 +603,14 @@ public class HRegion implements HConstants {
       }
       }
     }
     }
 
 
-    if(! shouldCompact) {
+    if (!shouldCompact) {
       LOG.info("not compacting region " + this.regionInfo.regionName);
       LOG.info("not compacting region " + this.regionInfo.regionName);
       return false;
       return false;
       
       
     } else {
     } else {
       try {
       try {
         LOG.info("starting compaction on region " + this.regionInfo.regionName);
         LOG.info("starting compaction on region " + this.regionInfo.regionName);
-        for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+        for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
           HStore store = it.next();
           HStore store = it.next();
           store.compact();
           store.compact();
         }
         }
@@ -632,7 +632,7 @@ public class HRegion implements HConstants {
    * only take if there have been a lot of uncommitted writes.
    * only take if there have been a lot of uncommitted writes.
    */
    */
   public void optionallyFlush() throws IOException {
   public void optionallyFlush() throws IOException {
-    if(commitsSinceFlush > maxUnflushedEntries) {
+    if (commitsSinceFlush > maxUnflushedEntries) {
       flushcache(false);
       flushcache(false);
     }
     }
   }
   }
@@ -657,20 +657,20 @@ public class HRegion implements HConstants {
   public Vector<HStoreFile> flushcache(boolean disableFutureWrites) throws IOException {
   public Vector<HStoreFile> flushcache(boolean disableFutureWrites) throws IOException {
     boolean shouldFlush = false;
     boolean shouldFlush = false;
     synchronized(writestate) {
     synchronized(writestate) {
-      if((! writestate.writesOngoing)
+      if ((!writestate.writesOngoing)
           && writestate.writesEnabled
           && writestate.writesEnabled
-          && (! writestate.closed)) {
+          && (!writestate.closed)) {
         
         
         writestate.writesOngoing = true;
         writestate.writesOngoing = true;
         shouldFlush = true;
         shouldFlush = true;
         
         
-        if(disableFutureWrites) {
+        if (disableFutureWrites) {
           writestate.writesEnabled = false;
           writestate.writesEnabled = false;
         }
         }
       }
       }
     }
     }
     
     
-    if(! shouldFlush) {
+    if (!shouldFlush) {
       LOG.debug("not flushing cache for region " + this.regionInfo.regionName);
       LOG.debug("not flushing cache for region " + this.regionInfo.regionName);
       return null;
       return null;
       
       
@@ -731,8 +731,8 @@ public class HRegion implements HConstants {
     
     
     HMemcache.Snapshot retval = memcache.snapshotMemcacheForLog(log);
     HMemcache.Snapshot retval = memcache.snapshotMemcacheForLog(log);
     TreeMap<HStoreKey, BytesWritable> memcacheSnapshot = retval.memcacheSnapshot;
     TreeMap<HStoreKey, BytesWritable> memcacheSnapshot = retval.memcacheSnapshot;
-    if(memcacheSnapshot == null) {
-      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+    if (memcacheSnapshot == null) {
+      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
         HStore hstore = it.next();
         HStore hstore = it.next();
         Vector<HStoreFile> hstoreFiles = hstore.getAllMapFiles();
         Vector<HStoreFile> hstoreFiles = hstore.getAllMapFiles();
         allHStoreFiles.addAll(0, hstoreFiles);
         allHStoreFiles.addAll(0, hstoreFiles);
@@ -746,7 +746,7 @@ public class HRegion implements HConstants {
     
     
     LOG.debug("flushing memcache to HStores");
     LOG.debug("flushing memcache to HStores");
     
     
-    for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+    for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
       HStore hstore = it.next();
       HStore hstore = it.next();
       Vector<HStoreFile> hstoreFiles 
       Vector<HStoreFile> hstoreFiles 
         = hstore.flushCache(memcacheSnapshot, logCacheFlushId);
         = hstore.flushCache(memcacheSnapshot, logCacheFlushId);
@@ -762,7 +762,7 @@ public class HRegion implements HConstants {
     LOG.debug("writing flush cache complete to log");
     LOG.debug("writing flush cache complete to log");
     
     
     log.completeCacheFlush(this.regionInfo.regionName,
     log.completeCacheFlush(this.regionInfo.regionName,
-        regionInfo.tableDesc.getName(), logCacheFlushId);
+                           regionInfo.tableDesc.getName(), logCacheFlushId);
 
 
     // C. Delete the now-irrelevant memcache snapshot; its contents have been 
     // C. Delete the now-irrelevant memcache snapshot; its contents have been 
     //    dumped to disk-based HStores.
     //    dumped to disk-based HStores.
@@ -784,7 +784,7 @@ public class HRegion implements HConstants {
   /** Fetch a single data item. */
   /** Fetch a single data item. */
   public byte[] get(Text row, Text column) throws IOException {
   public byte[] get(Text row, Text column) throws IOException {
     byte results[][] = get(row, column, Long.MAX_VALUE, 1);
     byte results[][] = get(row, column, Long.MAX_VALUE, 1);
-    if(results == null) {
+    if (results == null) {
       return null;
       return null;
       
       
     } else {
     } else {
@@ -799,9 +799,9 @@ public class HRegion implements HConstants {
 
 
   /** Fetch multiple versions of a single data item, with timestamp. */
   /** Fetch multiple versions of a single data item, with timestamp. */
   public byte[][] get(Text row, Text column, long timestamp, int numVersions) 
   public byte[][] get(Text row, Text column, long timestamp, int numVersions) 
-      throws IOException {
+    throws IOException {
     
     
-    if(writestate.closed) {
+    if (writestate.closed) {
       throw new IOException("HRegion is closed.");
       throw new IOException("HRegion is closed.");
     }
     }
 
 
@@ -830,7 +830,7 @@ public class HRegion implements HConstants {
     // Check the memcache
     // Check the memcache
 
 
     byte[][] result = memcache.get(key, numVersions);
     byte[][] result = memcache.get(key, numVersions);
-    if(result != null) {
+    if (result != null) {
       return result;
       return result;
     }
     }
 
 
@@ -838,7 +838,7 @@ public class HRegion implements HConstants {
 
 
     Text colFamily = HStoreKey.extractFamily(key.getColumn());
     Text colFamily = HStoreKey.extractFamily(key.getColumn());
     HStore targetStore = stores.get(colFamily);
     HStore targetStore = stores.get(colFamily);
-    if(targetStore == null) {
+    if (targetStore == null) {
       return null;
       return null;
     }
     }
     
     
@@ -859,7 +859,7 @@ public class HRegion implements HConstants {
     HStoreKey key = new HStoreKey(row, System.currentTimeMillis());
     HStoreKey key = new HStoreKey(row, System.currentTimeMillis());
 
 
     TreeMap<Text, byte[]> memResult = memcache.getFull(key);
     TreeMap<Text, byte[]> memResult = memcache.getFull(key);
-    for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext();) {
       Text colFamily = it.next();
       Text colFamily = it.next();
       HStore targetStore = stores.get(colFamily);
       HStore targetStore = stores.get(colFamily);
       targetStore.getFull(key, memResult);
       targetStore.getFull(key, memResult);
@@ -879,7 +879,7 @@ public class HRegion implements HConstants {
 
 
     HStore storelist[] = new HStore[families.size()];
     HStore storelist[] = new HStore[families.size()];
     int i = 0;
     int i = 0;
-    for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = families.iterator(); it.hasNext();) {
       Text family = it.next();
       Text family = it.next();
       storelist[i++] = stores.get(family);
       storelist[i++] = stores.get(family);
     }
     }
@@ -918,16 +918,16 @@ public class HRegion implements HConstants {
    * method.
    * method.
    */
    */
   public void put(long lockid, Text targetCol, byte[] val) throws IOException {
   public void put(long lockid, Text targetCol, byte[] val) throws IOException {
-    if(val.length == HStoreKey.DELETE_BYTES.length) {
+    if (val.length == HStoreKey.DELETE_BYTES.length) {
       boolean matches = true;
       boolean matches = true;
       for(int i = 0; i < val.length; i++) {
       for(int i = 0; i < val.length; i++) {
-        if(val[i] != HStoreKey.DELETE_BYTES[i]) {
+        if (val[i] != HStoreKey.DELETE_BYTES[i]) {
           matches = false;
           matches = false;
           break;
           break;
         }
         }
       }
       }
       
       
-      if(matches) {
+      if (matches) {
         throw new IOException("Cannot insert value: " + val);
         throw new IOException("Cannot insert value: " + val);
       }
       }
     }
     }
@@ -951,7 +951,7 @@ public class HRegion implements HConstants {
    */
    */
   void localput(long lockid, Text targetCol, byte[] val) throws IOException {
   void localput(long lockid, Text targetCol, byte[] val) throws IOException {
     Text row = getRowFromLock(lockid);
     Text row = getRowFromLock(lockid);
-    if(row == null) {
+    if (row == null) {
       throw new IOException("No write lock for lockid " + lockid);
       throw new IOException("No write lock for lockid " + lockid);
     }
     }
 
 
@@ -964,13 +964,13 @@ public class HRegion implements HConstants {
       // This check makes sure that another thread from the client
       // This check makes sure that another thread from the client
       // hasn't aborted/committed the write-operation.
       // hasn't aborted/committed the write-operation.
 
 
-      if(row != getRowFromLock(lockid)) {
+      if (row != getRowFromLock(lockid)) {
         throw new IOException("Locking error: put operation on lock " + lockid 
         throw new IOException("Locking error: put operation on lock " + lockid 
-            + " unexpected aborted by another thread");
+                              + " unexpected aborted by another thread");
       }
       }
       
       
       TreeMap<Text, byte[]> targets = targetColumns.get(lockid);
       TreeMap<Text, byte[]> targets = targetColumns.get(lockid);
-      if(targets == null) {
+      if (targets == null) {
         targets = new TreeMap<Text, byte[]>();
         targets = new TreeMap<Text, byte[]>();
         targetColumns.put(lockid, targets);
         targetColumns.put(lockid, targets);
       }
       }
@@ -985,7 +985,7 @@ public class HRegion implements HConstants {
    */
    */
   public void abort(long lockid) throws IOException {
   public void abort(long lockid) throws IOException {
     Text row = getRowFromLock(lockid);
     Text row = getRowFromLock(lockid);
-    if(row == null) {
+    if (row == null) {
       throw new IOException("No write lock for lockid " + lockid);
       throw new IOException("No write lock for lockid " + lockid);
     }
     }
     
     
@@ -998,9 +998,9 @@ public class HRegion implements HConstants {
       // This check makes sure another thread from the client
       // This check makes sure another thread from the client
       // hasn't aborted/committed the write-operation.
       // hasn't aborted/committed the write-operation.
       
       
-      if(row != getRowFromLock(lockid)) {
+      if (row != getRowFromLock(lockid)) {
         throw new IOException("Locking error: abort() operation on lock " 
         throw new IOException("Locking error: abort() operation on lock " 
-            + lockid + " unexpected aborted by another thread");
+                              + lockid + " unexpected aborted by another thread");
       }
       }
       
       
       targetColumns.remove(lockid);
       targetColumns.remove(lockid);
@@ -1021,7 +1021,7 @@ public class HRegion implements HConstants {
     // that repeated executions won't screw this up.
     // that repeated executions won't screw this up.
     
     
     Text row = getRowFromLock(lockid);
     Text row = getRowFromLock(lockid);
-    if(row == null) {
+    if (row == null) {
       throw new IOException("No write lock for lockid " + lockid);
       throw new IOException("No write lock for lockid " + lockid);
     }
     }
     
     
@@ -1035,7 +1035,7 @@ public class HRegion implements HConstants {
 
 
       long commitTimestamp = System.currentTimeMillis();
       long commitTimestamp = System.currentTimeMillis();
       log.append(regionInfo.regionName, regionInfo.tableDesc.getName(), row, 
       log.append(regionInfo.regionName, regionInfo.tableDesc.getName(), row, 
-          targetColumns.get(lockid), commitTimestamp);
+                 targetColumns.get(lockid), commitTimestamp);
       
       
       memcache.add(row, targetColumns.get(lockid), commitTimestamp);
       memcache.add(row, targetColumns.get(lockid), commitTimestamp);
 
 
@@ -1054,25 +1054,25 @@ public class HRegion implements HConstants {
 
 
   /** Make sure this is a valid row for the HRegion */
   /** Make sure this is a valid row for the HRegion */
   void checkRow(Text row) throws IOException {
   void checkRow(Text row) throws IOException {
-    if(((regionInfo.startKey.getLength() == 0)
-        || (regionInfo.startKey.compareTo(row) <= 0))
+    if (((regionInfo.startKey.getLength() == 0)
+         || (regionInfo.startKey.compareTo(row) <= 0))
         && ((regionInfo.endKey.getLength() == 0)
         && ((regionInfo.endKey.getLength() == 0)
             || (regionInfo.endKey.compareTo(row) > 0))) {
             || (regionInfo.endKey.compareTo(row) > 0))) {
       // all's well
       // all's well
       
       
     } else {
     } else {
       throw new IOException("Requested row out of range for HRegion "
       throw new IOException("Requested row out of range for HRegion "
-          + regionInfo.regionName + ", startKey='" + regionInfo.startKey
-          + "', endKey='" + regionInfo.endKey + "', row='" + row + "'");
+                            + regionInfo.regionName + ", startKey='" + regionInfo.startKey
+                            + "', endKey='" + regionInfo.endKey + "', row='" + row + "'");
     }
     }
   }
   }
 
 
   /** Make sure this is a valid column for the current table */
   /** Make sure this is a valid column for the current table */
   void checkFamily(Text family) throws IOException {
   void checkFamily(Text family) throws IOException {
-    if(! regionInfo.tableDesc.hasFamily(family)) {
+    if (!regionInfo.tableDesc.hasFamily(family)) {
       throw new IOException("Requested column family " + family 
       throw new IOException("Requested column family " + family 
-          + " does not exist in HRegion " + regionInfo.regionName
-          + " for table " + regionInfo.tableDesc.getName());
+                            + " does not exist in HRegion " + regionInfo.regionName
+                            + " for table " + regionInfo.tableDesc.getName());
     }
     }
   }
   }
 
 
@@ -1150,7 +1150,7 @@ public class HRegion implements HConstants {
         keys[i] = new HStoreKey();
         keys[i] = new HStoreKey();
         resultSets[i] = new TreeMap<Text, byte[]>();
         resultSets[i] = new TreeMap<Text, byte[]>();
 
 
-        if(! scanners[i].next(keys[i], resultSets[i])) {
+        if (!scanners[i].next(keys[i], resultSets[i])) {
           closeScanner(i);
           closeScanner(i);
         }
         }
       }
       }
@@ -1167,7 +1167,7 @@ public class HRegion implements HConstants {
       Text chosenRow = null;
       Text chosenRow = null;
       long chosenTimestamp = -1;
       long chosenTimestamp = -1;
       for(int i = 0; i < keys.length; i++) {
       for(int i = 0; i < keys.length; i++) {
-        if(scanners[i] != null
+        if (scanners[i] != null
             && (chosenRow == null
             && (chosenRow == null
                 || (keys[i].getRow().compareTo(chosenRow) < 0)
                 || (keys[i].getRow().compareTo(chosenRow) < 0)
                 || ((keys[i].getRow().compareTo(chosenRow) == 0)
                 || ((keys[i].getRow().compareTo(chosenRow) == 0)
@@ -1181,21 +1181,21 @@ public class HRegion implements HConstants {
       // Store the key and results for each sub-scanner. Merge them as appropriate.
       // Store the key and results for each sub-scanner. Merge them as appropriate.
       
       
       boolean insertedItem = false;
       boolean insertedItem = false;
-      if(chosenTimestamp > 0) {
+      if (chosenTimestamp > 0) {
         key.setRow(chosenRow);
         key.setRow(chosenRow);
         key.setVersion(chosenTimestamp);
         key.setVersion(chosenTimestamp);
         key.setColumn(new Text(""));
         key.setColumn(new Text(""));
 
 
         for(int i = 0; i < scanners.length; i++) {        
         for(int i = 0; i < scanners.length; i++) {        
           while((scanners[i] != null)
           while((scanners[i] != null)
-              && (keys[i].getRow().compareTo(chosenRow) == 0)
-              && (keys[i].getTimestamp() == chosenTimestamp)) {
+                && (keys[i].getRow().compareTo(chosenRow) == 0)
+                && (keys[i].getTimestamp() == chosenTimestamp)) {
             
             
             results.putAll(resultSets[i]);
             results.putAll(resultSets[i]);
             insertedItem = true;
             insertedItem = true;
 
 
             resultSets[i].clear();
             resultSets[i].clear();
-            if(! scanners[i].next(keys[i], resultSets[i])) {
+            if (!scanners[i].next(keys[i], resultSets[i])) {
               closeScanner(i);
               closeScanner(i);
             }
             }
           }
           }
@@ -1204,10 +1204,10 @@ public class HRegion implements HConstants {
           // row label, then its timestamp is bad.  We need to advance it.
           // row label, then its timestamp is bad.  We need to advance it.
 
 
           while((scanners[i] != null)
           while((scanners[i] != null)
-              && (keys[i].getRow().compareTo(chosenRow) <= 0)) {
+                && (keys[i].getRow().compareTo(chosenRow) <= 0)) {
             
             
             resultSets[i].clear();
             resultSets[i].clear();
-            if(! scanners[i].next(keys[i], resultSets[i])) {
+            if (!scanners[i].next(keys[i], resultSets[i])) {
               closeScanner(i);
               closeScanner(i);
             }
             }
           }
           }
@@ -1231,7 +1231,7 @@ public class HRegion implements HConstants {
     /** All done with the scanner. */
     /** All done with the scanner. */
     public void close() throws IOException {
     public void close() throws IOException {
       for(int i = 0; i < scanners.length; i++) {
       for(int i = 0; i < scanners.length; i++) {
-        if(scanners[i] != null) {
+        if (scanners[i] != null) {
           closeScanner(i);
           closeScanner(i);
         }
         }
       }
       }

+ 3 - 3
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java

@@ -42,19 +42,19 @@ public class HRegionInfo implements Writable {
     
     
     this.regionId = regionId;
     this.regionId = regionId;
     
     
-    if(tableDesc == null) {
+    if (tableDesc == null) {
       throw new IllegalArgumentException("tableDesc cannot be null");
       throw new IllegalArgumentException("tableDesc cannot be null");
     }
     }
     
     
     this.tableDesc = tableDesc;
     this.tableDesc = tableDesc;
     
     
     this.startKey = new Text();
     this.startKey = new Text();
-    if(startKey != null) {
+    if (startKey != null) {
       this.startKey.set(startKey);
       this.startKey.set(startKey);
     }
     }
     
     
     this.endKey = new Text();
     this.endKey = new Text();
-    if(endKey != null) {
+    if (endKey != null) {
       this.endKey.set(endKey);
       this.endKey.set(endKey);
     }
     }
     
     

+ 34 - 34
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java

@@ -61,7 +61,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     }
     }
     
     
     public void run() {
     public void run() {
-      while(! stopRequested) {
+      while(!stopRequested) {
         long startTime = System.currentTimeMillis();
         long startTime = System.currentTimeMillis();
 
 
         // Grab a list of regions to check
         // Grab a list of regions to check
@@ -78,12 +78,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
         // Check to see if they need splitting
         // Check to see if they need splitting
 
 
         Vector<SplitRegion> toSplit = new Vector<SplitRegion>();
         Vector<SplitRegion> toSplit = new Vector<SplitRegion>();
-        for(Iterator<HRegion> it = checkSplit.iterator(); it.hasNext(); ) {
+        for(Iterator<HRegion> it = checkSplit.iterator(); it.hasNext();) {
           HRegion cur = it.next();
           HRegion cur = it.next();
           Text midKey = new Text();
           Text midKey = new Text();
           
           
           try {
           try {
-            if(cur.needsSplit(midKey)) {
+            if (cur.needsSplit(midKey)) {
               toSplit.add(new SplitRegion(cur, midKey));
               toSplit.add(new SplitRegion(cur, midKey));
             }
             }
             
             
@@ -92,7 +92,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
           }
           }
         }
         }
 
 
-        for(Iterator<SplitRegion> it = toSplit.iterator(); it.hasNext(); ) {
+        for(Iterator<SplitRegion> it = toSplit.iterator(); it.hasNext();) {
           SplitRegion r = it.next();
           SplitRegion r = it.next();
           
           
           locking.obtainWriteLock();
           locking.obtainWriteLock();
@@ -161,7 +161,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   private Thread cacheFlusherThread;
   private Thread cacheFlusherThread;
   private class Flusher implements Runnable {
   private class Flusher implements Runnable {
     public void run() {
     public void run() {
-      while(! stopRequested) {
+      while(!stopRequested) {
         long startTime = System.currentTimeMillis();
         long startTime = System.currentTimeMillis();
 
 
         // Grab a list of items to flush
         // Grab a list of items to flush
@@ -177,7 +177,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
 
 
         // Flush them, if necessary
         // Flush them, if necessary
 
 
-        for(Iterator<HRegion> it = toFlush.iterator(); it.hasNext(); ) {
+        for(Iterator<HRegion> it = toFlush.iterator(); it.hasNext();) {
           HRegion cur = it.next();
           HRegion cur = it.next();
           
           
           try {
           try {
@@ -212,12 +212,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   private Thread logRollerThread;
   private Thread logRollerThread;
   private class LogRoller implements Runnable {
   private class LogRoller implements Runnable {
     public void run() {
     public void run() {
-      while(! stopRequested) {
+      while(!stopRequested) {
 
 
         // If the number of log entries is high enough, roll the log.  This is a
         // If the number of log entries is high enough, roll the log.  This is a
         // very fast operation, but should not be done too frequently.
         // very fast operation, but should not be done too frequently.
 
 
-        if(log.getNumEntries() > maxLogEntries) {
+        if (log.getNumEntries() > maxLogEntries) {
           try {
           try {
             log.rollWriter();
             log.rollWriter();
             
             
@@ -334,7 +334,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
    * processing to cease.
    * processing to cease.
    */
    */
   public void stop() throws IOException {
   public void stop() throws IOException {
-    if(! stopRequested) {
+    if (!stopRequested) {
       stopRequested = true;
       stopRequested = true;
  
  
       closeAllRegions();
       closeAllRegions();
@@ -375,7 +375,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
    * load/unload instructions.
    * load/unload instructions.
    */
    */
   public void run() {
   public void run() {
-    while(! stopRequested) {
+    while(!stopRequested) {
       HServerInfo info = new HServerInfo(address, rand.nextLong());
       HServerInfo info = new HServerInfo(address, rand.nextLong());
       long lastMsg = 0;
       long lastMsg = 0;
       long waitTime;
       long waitTime;
@@ -398,8 +398,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
       
       
       // Now ask the master what it wants us to do and tell it what we have done.
       // Now ask the master what it wants us to do and tell it what we have done.
       
       
-      while(! stopRequested) {
-        if((System.currentTimeMillis() - lastMsg) >= msgInterval) {
+      while(!stopRequested) {
+        if ((System.currentTimeMillis() - lastMsg) >= msgInterval) {
 
 
           HMsg outboundArray[] = null;
           HMsg outboundArray[] = null;
           synchronized(outboundMsgs) {
           synchronized(outboundMsgs) {
@@ -413,7 +413,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
 
 
             // Process the HMaster's instruction stream
             // Process the HMaster's instruction stream
 
 
-            if(! processMessages(msgs)) {
+            if (!processMessages(msgs)) {
               break;
               break;
             }
             }
 
 
@@ -529,10 +529,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     try {
     try {
       HRegion region = regions.remove(info.regionName);
       HRegion region = regions.remove(info.regionName);
       
       
-      if(region != null) {
+      if (region != null) {
         region.close();
         region.close();
         
         
-        if(reportWhenCompleted) {
+        if (reportWhenCompleted) {
           reportClose(region);
           reportClose(region);
         }
         }
       }
       }
@@ -548,7 +548,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     try {
     try {
       HRegion region = regions.remove(info.regionName);
       HRegion region = regions.remove(info.regionName);
   
   
-      if(region != null) {
+      if (region != null) {
         region.closeAndDelete();
         region.closeAndDelete();
       }
       }
   
   
@@ -561,7 +561,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   private void closeAllRegions() throws IOException {
   private void closeAllRegions() throws IOException {
     locking.obtainWriteLock();
     locking.obtainWriteLock();
     try {
     try {
-      for(Iterator<HRegion> it = regions.values().iterator(); it.hasNext(); ) {
+      for(Iterator<HRegion> it = regions.values().iterator(); it.hasNext();) {
         HRegion region = it.next();
         HRegion region = it.next();
         region.close();
         region.close();
       }
       }
@@ -606,7 +606,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   /** Obtain a table descriptor for the given region */
   /** Obtain a table descriptor for the given region */
   public HRegionInfo getRegionInfo(Text regionName) {
   public HRegionInfo getRegionInfo(Text regionName) {
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       return null;
       return null;
     }
     }
     return region.getRegionInfo();
     return region.getRegionInfo();
@@ -617,7 +617,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
                                        Text firstRow) throws IOException {
                                        Text firstRow) throws IOException {
 
 
     HRegion r = getRegion(regionName);
     HRegion r = getRegion(regionName);
-    if(r == null) {
+    if (r == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     return r.getScanner(cols, firstRow);
     return r.getScanner(cols, firstRow);
@@ -626,12 +626,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   /** Get the indicated row/column */
   /** Get the indicated row/column */
   public BytesWritable get(Text regionName, Text row, Text column) throws IOException {
   public BytesWritable get(Text regionName, Text row, Text column) throws IOException {
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     
     
     byte results[] = region.get(row, column);
     byte results[] = region.get(row, column);
-    if(results != null) {
+    if (results != null) {
       return new BytesWritable(results);
       return new BytesWritable(results);
     }
     }
     return null;
     return null;
@@ -642,15 +642,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
                              int numVersions) throws IOException {
                              int numVersions) throws IOException {
     
     
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     
     
     byte results[][] = region.get(row, column, numVersions);
     byte results[][] = region.get(row, column, numVersions);
-    if(results != null) {
+    if (results != null) {
       BytesWritable realResults[] = new BytesWritable[results.length];
       BytesWritable realResults[] = new BytesWritable[results.length];
       for(int i = 0; i < realResults.length; i++) {
       for(int i = 0; i < realResults.length; i++) {
-        if(results[i] != null) {
+        if (results[i] != null) {
           realResults[i] = new BytesWritable(results[i]);
           realResults[i] = new BytesWritable(results[i]);
         }
         }
       }
       }
@@ -664,15 +664,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
                              long timestamp, int numVersions) throws IOException {
                              long timestamp, int numVersions) throws IOException {
     
     
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     
     
     byte results[][] = region.get(row, column, timestamp, numVersions);
     byte results[][] = region.get(row, column, timestamp, numVersions);
-    if(results != null) {
+    if (results != null) {
       BytesWritable realResults[] = new BytesWritable[results.length];
       BytesWritable realResults[] = new BytesWritable[results.length];
       for(int i = 0; i < realResults.length; i++) {
       for(int i = 0; i < realResults.length; i++) {
-        if(results[i] != null) {
+        if (results[i] != null) {
           realResults[i] = new BytesWritable(results[i]);
           realResults[i] = new BytesWritable(results[i]);
         }
         }
       }
       }
@@ -684,14 +684,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   /** Get all the columns (along with their names) for a given row. */
   /** Get all the columns (along with their names) for a given row. */
   public LabelledData[] getRow(Text regionName, Text row) throws IOException {
   public LabelledData[] getRow(Text regionName, Text row) throws IOException {
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     
     
     TreeMap<Text, byte[]> map = region.getFull(row);
     TreeMap<Text, byte[]> map = region.getFull(row);
     LabelledData result[] = new LabelledData[map.size()];
     LabelledData result[] = new LabelledData[map.size()];
     int counter = 0;
     int counter = 0;
-    for(Iterator<Text> it = map.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = map.keySet().iterator(); it.hasNext();) {
       Text colname = it.next();
       Text colname = it.next();
       byte val[] = map.get(colname);
       byte val[] = map.get(colname);
       result[counter++] = new LabelledData(colname, val);
       result[counter++] = new LabelledData(colname, val);
@@ -726,7 +726,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     throws IOException {
     throws IOException {
     
     
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     
     
@@ -743,7 +743,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
                   BytesWritable val) throws IOException {
                   BytesWritable val) throws IOException {
     
     
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     
     
@@ -758,7 +758,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     throws IOException {
     throws IOException {
     
     
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     
     
@@ -773,7 +773,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     throws IOException {
     throws IOException {
     
     
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     
     
@@ -788,7 +788,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     throws IOException {
     throws IOException {
     
     
     HRegion region = getRegion(regionName);
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
       throw new IOException("Not serving region " + regionName);
     }
     }
     
     

+ 3 - 3
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java

@@ -35,7 +35,7 @@ public class HServerAddress implements Writable {
   
   
   public HServerAddress(String hostAndPort) {
   public HServerAddress(String hostAndPort) {
     int colonIndex = hostAndPort.indexOf(':');
     int colonIndex = hostAndPort.indexOf(':');
-    if(colonIndex < 0) {
+    if (colonIndex < 0) {
       throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
       throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
     }
     }
     String host = hostAndPort.substring(0, colonIndex);
     String host = hostAndPort.substring(0, colonIndex);
@@ -80,7 +80,7 @@ public class HServerAddress implements Writable {
     String bindAddress = in.readUTF();
     String bindAddress = in.readUTF();
     int port = in.readInt();
     int port = in.readInt();
     
     
-    if(bindAddress == null || bindAddress.length() == 0) {
+    if (bindAddress == null || bindAddress.length() == 0) {
       address = null;
       address = null;
       stringValue = null;
       stringValue = null;
       
       
@@ -91,7 +91,7 @@ public class HServerAddress implements Writable {
   }
   }
 
 
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
-    if(address == null) {
+    if (address == null) {
       out.writeUTF("");
       out.writeUTF("");
       out.writeInt(0);
       out.writeInt(0);
       
       

+ 59 - 59
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java

@@ -110,7 +110,7 @@ public class HStore {
 
 
     this.compactdir = new Path(dir, COMPACTION_DIR);
     this.compactdir = new Path(dir, COMPACTION_DIR);
     Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
     Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
-    if(fs.exists(curCompactStore)) {
+    if (fs.exists(curCompactStore)) {
       processReadyCompaction();
       processReadyCompaction();
       fs.delete(curCompactStore);
       fs.delete(curCompactStore);
     }
     }
@@ -123,7 +123,7 @@ public class HStore {
     Vector<HStoreFile> hstoreFiles 
     Vector<HStoreFile> hstoreFiles 
       = HStoreFile.loadHStoreFiles(conf, dir, regionName, colFamily, fs);
       = HStoreFile.loadHStoreFiles(conf, dir, regionName, colFamily, fs);
     
     
-    for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       HStoreFile hsf = it.next();
       mapFiles.put(hsf.loadInfo(fs), hsf);
       mapFiles.put(hsf.loadInfo(fs), hsf);
     }
     }
@@ -138,11 +138,11 @@ public class HStore {
     // contain any updates also contained in the log.
     // contain any updates also contained in the log.
 
 
     long maxSeqID = -1;
     long maxSeqID = -1;
-    for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       HStoreFile hsf = it.next();
       long seqid = hsf.loadInfo(fs);
       long seqid = hsf.loadInfo(fs);
-      if(seqid > 0) {
-        if(seqid > maxSeqID) {
+      if (seqid > 0) {
+        if (seqid > maxSeqID) {
           maxSeqID = seqid;
           maxSeqID = seqid;
         }
         }
       }
       }
@@ -157,7 +157,7 @@ public class HStore {
 
 
     LOG.debug("reading reconstructionLog");
     LOG.debug("reading reconstructionLog");
     
     
-    if(reconstructionLog != null && fs.exists(reconstructionLog)) {
+    if (reconstructionLog != null && fs.exists(reconstructionLog)) {
       long maxSeqIdInLog = -1;
       long maxSeqIdInLog = -1;
       TreeMap<HStoreKey, BytesWritable> reconstructedCache 
       TreeMap<HStoreKey, BytesWritable> reconstructedCache 
         = new TreeMap<HStoreKey, BytesWritable>();
         = new TreeMap<HStoreKey, BytesWritable>();
@@ -170,7 +170,7 @@ public class HStore {
         HLogEdit val = new HLogEdit();
         HLogEdit val = new HLogEdit();
         while(login.next(key, val)) {
         while(login.next(key, val)) {
           maxSeqIdInLog = Math.max(maxSeqIdInLog, key.getLogSeqNum());
           maxSeqIdInLog = Math.max(maxSeqIdInLog, key.getLogSeqNum());
-          if(key.getLogSeqNum() <= maxSeqID) {
+          if (key.getLogSeqNum() <= maxSeqID) {
             continue;
             continue;
           }
           }
           reconstructedCache.put(new HStoreKey(key.getRow(), val.getColumn(), 
           reconstructedCache.put(new HStoreKey(key.getRow(), val.getColumn(), 
@@ -181,7 +181,7 @@ public class HStore {
         login.close();
         login.close();
       }
       }
 
 
-      if(reconstructedCache.size() > 0) {
+      if (reconstructedCache.size() > 0) {
         
         
         // We create a "virtual flush" at maxSeqIdInLog+1.
         // We create a "virtual flush" at maxSeqIdInLog+1.
         
         
@@ -195,7 +195,7 @@ public class HStore {
     // should be "timeless"; that is, it should not have an associated seq-ID, 
     // should be "timeless"; that is, it should not have an associated seq-ID, 
     // because all log messages have been reflected in the TreeMaps at this point.
     // because all log messages have been reflected in the TreeMaps at this point.
     
     
-    if(mapFiles.size() >= 1) {
+    if (mapFiles.size() >= 1) {
       compactHelper(true);
       compactHelper(true);
     }
     }
 
 
@@ -204,7 +204,7 @@ public class HStore {
 
 
     LOG.debug("starting map readers");
     LOG.debug("starting map readers");
     
     
-    for(Iterator<Long> it = mapFiles.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Long> it = mapFiles.keySet().iterator(); it.hasNext();) {
       Long key = it.next().longValue();
       Long key = it.next().longValue();
       HStoreFile hsf = mapFiles.get(key);
       HStoreFile hsf = mapFiles.get(key);
 
 
@@ -222,7 +222,7 @@ public class HStore {
     LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily);
     LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily);
     
     
     try {
     try {
-      for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext(); ) {
+      for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext();) {
         MapFile.Reader map = it.next();
         MapFile.Reader map = it.next();
         map.close();
         map.close();
       }
       }
@@ -273,9 +273,9 @@ public class HStore {
                                               HStoreKey.class, BytesWritable.class);
                                               HStoreKey.class, BytesWritable.class);
       
       
       try {
       try {
-        for(Iterator<HStoreKey> it = inputCache.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<HStoreKey> it = inputCache.keySet().iterator(); it.hasNext();) {
           HStoreKey curkey = it.next();
           HStoreKey curkey = it.next();
-          if(this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
+          if (this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
             BytesWritable val = inputCache.get(curkey);
             BytesWritable val = inputCache.get(curkey);
             out.append(curkey, val);
             out.append(curkey, val);
           }
           }
@@ -294,7 +294,7 @@ public class HStore {
 
 
       // C. Finally, make the new MapFile available.
       // C. Finally, make the new MapFile available.
 
 
-      if(addToAvailableMaps) {
+      if (addToAvailableMaps) {
         locking.obtainWriteLock();
         locking.obtainWriteLock();
         
         
         try {
         try {
@@ -312,7 +312,7 @@ public class HStore {
 
 
   public Vector<HStoreFile> getAllMapFiles() {
   public Vector<HStoreFile> getAllMapFiles() {
     Vector<HStoreFile> flushedFiles = new Vector<HStoreFile>();
     Vector<HStoreFile> flushedFiles = new Vector<HStoreFile>();
-    for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       HStoreFile hsf = it.next();
       flushedFiles.add(hsf);
       flushedFiles.add(hsf);
     }
     }
@@ -366,11 +366,11 @@ public class HStore {
         // Compute the max-sequenceID seen in any of the to-be-compacted TreeMaps
         // Compute the max-sequenceID seen in any of the to-be-compacted TreeMaps
 
 
         long maxSeenSeqID = -1;
         long maxSeenSeqID = -1;
-        for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
+        for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
           HStoreFile hsf = it.next();
           HStoreFile hsf = it.next();
           long seqid = hsf.loadInfo(fs);
           long seqid = hsf.loadInfo(fs);
-          if(seqid > 0) {
-            if(seqid > maxSeenSeqID) {
+          if (seqid > 0) {
+            if (seqid > maxSeenSeqID) {
               maxSeenSeqID = seqid;
               maxSeenSeqID = seqid;
             }
             }
           }
           }
@@ -380,11 +380,11 @@ public class HStore {
         HStoreFile compactedOutputFile 
         HStoreFile compactedOutputFile 
           = new HStoreFile(conf, compactdir, regionName, colFamily, -1);
           = new HStoreFile(conf, compactdir, regionName, colFamily, -1);
         
         
-        if(toCompactFiles.size() == 1) {
+        if (toCompactFiles.size() == 1) {
           LOG.debug("nothing to compact for " + this.regionName + "/" + this.colFamily);
           LOG.debug("nothing to compact for " + this.regionName + "/" + this.colFamily);
           
           
           HStoreFile hsf = toCompactFiles.elementAt(0);
           HStoreFile hsf = toCompactFiles.elementAt(0);
-          if(hsf.loadInfo(fs) == -1) {
+          if (hsf.loadInfo(fs) == -1) {
             return;
             return;
           }
           }
         }
         }
@@ -414,7 +414,7 @@ public class HStore {
           BytesWritable[] vals = new BytesWritable[toCompactFiles.size()];
           BytesWritable[] vals = new BytesWritable[toCompactFiles.size()];
           boolean[] done = new boolean[toCompactFiles.size()];
           boolean[] done = new boolean[toCompactFiles.size()];
           int pos = 0;
           int pos = 0;
-          for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
+          for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
             HStoreFile hsf = it.next();
             HStoreFile hsf = it.next();
             readers[pos] = new MapFile.Reader(fs, hsf.getMapFilePath().toString(), conf);
             readers[pos] = new MapFile.Reader(fs, hsf.getMapFilePath().toString(), conf);
             keys[pos] = new HStoreKey();
             keys[pos] = new HStoreKey();
@@ -431,8 +431,8 @@ public class HStore {
           int numDone = 0;
           int numDone = 0;
           for(int i = 0; i < readers.length; i++) {
           for(int i = 0; i < readers.length; i++) {
             readers[i].reset();
             readers[i].reset();
-            done[i] = ! readers[i].next(keys[i], vals[i]);
-            if(done[i]) {
+            done[i] = !readers[i].next(keys[i], vals[i]);
+            if (done[i]) {
               numDone++;
               numDone++;
             }
             }
           }
           }
@@ -446,15 +446,15 @@ public class HStore {
 
 
             int smallestKey = -1;
             int smallestKey = -1;
             for(int i = 0; i < readers.length; i++) {
             for(int i = 0; i < readers.length; i++) {
-              if(done[i]) {
+              if (done[i]) {
                 continue;
                 continue;
               }
               }
               
               
-              if(smallestKey < 0) {
+              if (smallestKey < 0) {
                 smallestKey = i;
                 smallestKey = i;
               
               
               } else {
               } else {
-                if(keys[i].compareTo(keys[smallestKey]) < 0) {
+                if (keys[i].compareTo(keys[smallestKey]) < 0) {
                   smallestKey = i;
                   smallestKey = i;
                 }
                 }
               }
               }
@@ -463,8 +463,8 @@ public class HStore {
             // Reflect the current key/val in the output
             // Reflect the current key/val in the output
 
 
             HStoreKey sk = keys[smallestKey];
             HStoreKey sk = keys[smallestKey];
-            if(lastRow.equals(sk.getRow())
-               && lastColumn.equals(sk.getColumn())) {
+            if (lastRow.equals(sk.getRow())
+                && lastColumn.equals(sk.getColumn())) {
               
               
               timesSeen++;
               timesSeen++;
               
               
@@ -472,13 +472,13 @@ public class HStore {
               timesSeen = 1;
               timesSeen = 1;
             }
             }
             
             
-            if(timesSeen <= maxVersions) {
+            if (timesSeen <= maxVersions) {
 
 
               // Keep old versions until we have maxVersions worth.
               // Keep old versions until we have maxVersions worth.
               // Then just skip them.
               // Then just skip them.
 
 
-              if(sk.getRow().getLength() != 0
-                 && sk.getColumn().getLength() != 0) {
+              if (sk.getRow().getLength() != 0
+                  && sk.getColumn().getLength() != 0) {
                 
                 
                 // Only write out objects which have a non-zero length key and value
                 // Only write out objects which have a non-zero length key and value
 
 
@@ -499,7 +499,7 @@ public class HStore {
             // Advance the smallest key.  If that reader's all finished, then 
             // Advance the smallest key.  If that reader's all finished, then 
             // mark it as done.
             // mark it as done.
 
 
-            if(! readers[smallestKey].next(keys[smallestKey], vals[smallestKey])) {
+            if (!readers[smallestKey].next(keys[smallestKey], vals[smallestKey])) {
               done[smallestKey] = true;
               done[smallestKey] = true;
               readers[smallestKey].close();
               readers[smallestKey].close();
               numDone++;
               numDone++;
@@ -516,7 +516,7 @@ public class HStore {
 
 
         // Now, write out an HSTORE_LOGINFOFILE for the brand-new TreeMap.
         // Now, write out an HSTORE_LOGINFOFILE for the brand-new TreeMap.
 
 
-        if((! deleteSequenceInfo) && maxSeenSeqID >= 0) {
+        if ((!deleteSequenceInfo) && maxSeenSeqID >= 0) {
           compactedOutputFile.writeInfo(fs, maxSeenSeqID);
           compactedOutputFile.writeInfo(fs, maxSeenSeqID);
           
           
         } else {
         } else {
@@ -529,7 +529,7 @@ public class HStore {
         DataOutputStream out = new DataOutputStream(fs.create(filesToReplace));
         DataOutputStream out = new DataOutputStream(fs.create(filesToReplace));
         try {
         try {
           out.writeInt(toCompactFiles.size());
           out.writeInt(toCompactFiles.size());
-          for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
+          for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
             HStoreFile hsf = it.next();
             HStoreFile hsf = it.next();
             hsf.write(out);
             hsf.write(out);
           }
           }
@@ -587,7 +587,7 @@ public class HStore {
     Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
     Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
     try {
     try {
       Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
       Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
-      if(! fs.exists(doneFile)) {
+      if (!fs.exists(doneFile)) {
         
         
         // The last execution didn't finish the compaction, so there's nothing 
         // The last execution didn't finish the compaction, so there's nothing 
         // we can do.  We'll just have to redo it. Abandon it and return.
         // we can do.  We'll just have to redo it. Abandon it and return.
@@ -622,18 +622,18 @@ public class HStore {
       // 3. Unload all the replaced MapFiles.
       // 3. Unload all the replaced MapFiles.
       
       
       Iterator<HStoreFile> it2 = mapFiles.values().iterator();
       Iterator<HStoreFile> it2 = mapFiles.values().iterator();
-      for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext(); ) {
+      for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext();) {
         MapFile.Reader curReader = it.next();
         MapFile.Reader curReader = it.next();
         HStoreFile curMapFile = it2.next();
         HStoreFile curMapFile = it2.next();
-        if(toCompactFiles.contains(curMapFile)) {
+        if (toCompactFiles.contains(curMapFile)) {
           curReader.close();
           curReader.close();
           it.remove();
           it.remove();
         }
         }
       }
       }
       
       
-      for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext(); ) {
+      for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext();) {
         HStoreFile curMapFile = it.next();
         HStoreFile curMapFile = it.next();
-        if(toCompactFiles.contains(curMapFile)) {
+        if (toCompactFiles.contains(curMapFile)) {
           it.remove();
           it.remove();
         }
         }
       }
       }
@@ -645,7 +645,7 @@ public class HStore {
 
 
       // 4. Delete all the old files, no longer needed
       // 4. Delete all the old files, no longer needed
       
       
-      for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
+      for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
         HStoreFile hsf = it.next();
         HStoreFile hsf = it.next();
         fs.delete(hsf.getMapFilePath());
         fs.delete(hsf.getMapFilePath());
         fs.delete(hsf.getInfoFilePath());
         fs.delete(hsf.getInfoFilePath());
@@ -720,12 +720,12 @@ public class HStore {
           
           
           do {
           do {
             Text readcol = readkey.getColumn();
             Text readcol = readkey.getColumn();
-            if(results.get(readcol) == null
-               && key.matchesWithoutColumn(readkey)) {
+            if (results.get(readcol) == null
+                && key.matchesWithoutColumn(readkey)) {
               results.put(new Text(readcol), readval.get());
               results.put(new Text(readcol), readval.get());
               readval = new BytesWritable();
               readval = new BytesWritable();
               
               
-            } else if(key.getRow().compareTo(readkey.getRow()) > 0) {
+            } else if (key.getRow().compareTo(readkey.getRow()) > 0) {
               break;
               break;
             }
             }
             
             
@@ -745,7 +745,7 @@ public class HStore {
    * If 'numVersions' is negative, the method returns all available versions.
    * If 'numVersions' is negative, the method returns all available versions.
    */
    */
   public byte[][] get(HStoreKey key, int numVersions) throws IOException {
   public byte[][] get(HStoreKey key, int numVersions) throws IOException {
-    if(numVersions == 0) {
+    if (numVersions == 0) {
       throw new IllegalArgumentException("Must request at least one value.");
       throw new IllegalArgumentException("Must request at least one value.");
     }
     }
     
     
@@ -763,12 +763,12 @@ public class HStore {
           map.reset();
           map.reset();
           HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
           HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
           
           
-          if(readkey.matchesRowCol(key)) {
+          if (readkey.matchesRowCol(key)) {
             results.add(readval.get());
             results.add(readval.get());
             readval = new BytesWritable();
             readval = new BytesWritable();
 
 
             while(map.next(readkey, readval) && readkey.matchesRowCol(key)) {
             while(map.next(readkey, readval) && readkey.matchesRowCol(key)) {
-              if(numVersions > 0 && (results.size() >= numVersions)) {
+              if (numVersions > 0 && (results.size() >= numVersions)) {
                 break;
                 break;
                 
                 
               } else {
               } else {
@@ -778,12 +778,12 @@ public class HStore {
             }
             }
           }
           }
         }
         }
-        if(results.size() >= numVersions) {
+        if (results.size() >= numVersions) {
           break;
           break;
         }
         }
       }
       }
 
 
-      if(results.size() == 0) {
+      if (results.size() == 0) {
         return null;
         return null;
         
         
       } else {
       } else {
@@ -809,13 +809,13 @@ public class HStore {
     // Iterate through all the MapFiles
     // Iterate through all the MapFiles
     
     
     for(Iterator<Map.Entry<Long, HStoreFile>> it = mapFiles.entrySet().iterator();
     for(Iterator<Map.Entry<Long, HStoreFile>> it = mapFiles.entrySet().iterator();
-        it.hasNext(); ) {
+        it.hasNext();) {
       
       
       Map.Entry<Long, HStoreFile> e = it.next();
       Map.Entry<Long, HStoreFile> e = it.next();
       HStoreFile curHSF = e.getValue();
       HStoreFile curHSF = e.getValue();
       long size = fs.getLength(new Path(curHSF.getMapFilePath(), MapFile.DATA_FILE_NAME));
       long size = fs.getLength(new Path(curHSF.getMapFilePath(), MapFile.DATA_FILE_NAME));
       
       
-      if(size > maxSize) {              // This is the largest one so far
+      if (size > maxSize) {              // This is the largest one so far
         maxSize = size;
         maxSize = size;
         mapIndex = e.getKey();
         mapIndex = e.getKey();
       }
       }
@@ -871,7 +871,7 @@ public class HStore {
       try {
       try {
         this.readers = new MapFile.Reader[mapFiles.size()];
         this.readers = new MapFile.Reader[mapFiles.size()];
         int i = 0;
         int i = 0;
-        for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext(); ) {
+        for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext();) {
           HStoreFile curHSF = it.next();
           HStoreFile curHSF = it.next();
           readers[i++] = new MapFile.Reader(fs, curHSF.getMapFilePath().toString(), conf);
           readers[i++] = new MapFile.Reader(fs, curHSF.getMapFilePath().toString(), conf);
         }
         }
@@ -885,14 +885,14 @@ public class HStore {
           keys[i] = new HStoreKey();
           keys[i] = new HStoreKey();
           vals[i] = new BytesWritable();
           vals[i] = new BytesWritable();
 
 
-          if(firstRow.getLength() != 0) {
-            if(findFirstRow(i, firstRow)) {
+          if (firstRow.getLength() != 0) {
+            if (findFirstRow(i, firstRow)) {
               continue;
               continue;
             }
             }
           }
           }
           
           
           while(getNext(i)) {
           while(getNext(i)) {
-            if(columnMatch(i)) {
+            if (columnMatch(i)) {
               break;
               break;
             }
             }
           }
           }
@@ -915,7 +915,7 @@ public class HStore {
       HStoreKey firstKey
       HStoreKey firstKey
         = (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), vals[i]);
         = (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), vals[i]);
       
       
-      if(firstKey == null) {
+      if (firstKey == null) {
         
         
         // Didn't find it. Close the scanner and return TRUE
         // Didn't find it. Close the scanner and return TRUE
         
         
@@ -935,7 +935,7 @@ public class HStore {
      * @return - true if there is more data available
      * @return - true if there is more data available
      */
      */
     boolean getNext(int i) throws IOException {
     boolean getNext(int i) throws IOException {
-      if(! readers[i].next(keys[i], vals[i])) {
+      if (!readers[i].next(keys[i], vals[i])) {
         closeSubScanner(i);
         closeSubScanner(i);
         return false;
         return false;
       }
       }
@@ -945,7 +945,7 @@ public class HStore {
     /** Close down the indicated reader. */
     /** Close down the indicated reader. */
     void closeSubScanner(int i) throws IOException {
     void closeSubScanner(int i) throws IOException {
       try {
       try {
-        if(readers[i] != null) {
+        if (readers[i] != null) {
           readers[i].close();
           readers[i].close();
         }
         }
         
         
@@ -958,10 +958,10 @@ public class HStore {
 
 
     /** Shut it down! */
     /** Shut it down! */
     public void close() throws IOException {
     public void close() throws IOException {
-      if(! scannerClosed) {
+      if (!scannerClosed) {
         try {
         try {
           for(int i = 0; i < readers.length; i++) {
           for(int i = 0; i < readers.length; i++) {
-            if(readers[i] != null) {
+            if (readers[i] != null) {
               readers[i].close();
               readers[i].close();
             }
             }
           }
           }

+ 14 - 14
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java

@@ -158,13 +158,13 @@ public class HStoreFile implements HConstants, WritableComparable {
     for(int i = 0; i < datfiles.length; i++) {
     for(int i = 0; i < datfiles.length; i++) {
       String name = datfiles[i].getName();
       String name = datfiles[i].getName();
       
       
-      if(name.startsWith(HSTORE_DATFILE_PREFIX)) {
+      if (name.startsWith(HSTORE_DATFILE_PREFIX)) {
         Long fileId = Long.parseLong(name.substring(HSTORE_DATFILE_PREFIX.length()));
         Long fileId = Long.parseLong(name.substring(HSTORE_DATFILE_PREFIX.length()));
         HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
         HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
         Path mapfile = curfile.getMapFilePath();
         Path mapfile = curfile.getMapFilePath();
         Path infofile = curfile.getInfoFilePath();
         Path infofile = curfile.getInfoFilePath();
         
         
-        if(fs.exists(infofile)) {
+        if (fs.exists(infofile)) {
           results.add(curfile);
           results.add(curfile);
           
           
         } else {
         } else {
@@ -178,12 +178,12 @@ public class HStoreFile implements HConstants, WritableComparable {
     for(int i = 0; i < infofiles.length; i++) {
     for(int i = 0; i < infofiles.length; i++) {
       String name = infofiles[i].getName();
       String name = infofiles[i].getName();
       
       
-      if(name.startsWith(HSTORE_INFOFILE_PREFIX)) {
+      if (name.startsWith(HSTORE_INFOFILE_PREFIX)) {
         long fileId = Long.parseLong(name.substring(HSTORE_INFOFILE_PREFIX.length()));
         long fileId = Long.parseLong(name.substring(HSTORE_INFOFILE_PREFIX.length()));
         HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
         HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
         Path mapfile = curfile.getMapFilePath();
         Path mapfile = curfile.getMapFilePath();
         
         
-        if(! fs.exists(mapfile)) {
+        if (!fs.exists(mapfile)) {
           fs.delete(curfile.getInfoFilePath());
           fs.delete(curfile.getInfoFilePath());
         }
         }
       }
       }
@@ -220,7 +220,7 @@ public class HStoreFile implements HConstants, WritableComparable {
           while(in.next(readkey, readval)) {
           while(in.next(readkey, readval)) {
             Text key = readkey.getRow();
             Text key = readkey.getRow();
             
             
-            if(key.compareTo(midKey) < 0) {
+            if (key.compareTo(midKey) < 0) {
               outA.append(readkey, readval);
               outA.append(readkey, readval);
               
               
             } else {
             } else {
@@ -260,7 +260,7 @@ public class HStoreFile implements HConstants, WritableComparable {
                                             HStoreKey.class, BytesWritable.class);
                                             HStoreKey.class, BytesWritable.class);
     
     
     try {
     try {
-      for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
+      for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext();) {
         HStoreFile src = it.next();
         HStoreFile src = it.next();
         MapFile.Reader in = new MapFile.Reader(fs, src.getMapFilePath().toString(), conf);
         MapFile.Reader in = new MapFile.Reader(fs, src.getMapFilePath().toString(), conf);
         
         
@@ -283,11 +283,11 @@ public class HStoreFile implements HConstants, WritableComparable {
     // Build a unified InfoFile from the source InfoFiles.
     // Build a unified InfoFile from the source InfoFiles.
 
 
     long unifiedSeqId = -1;
     long unifiedSeqId = -1;
-    for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       HStoreFile hsf = it.next();
       long curSeqId = hsf.loadInfo(fs);
       long curSeqId = hsf.loadInfo(fs);
       
       
-      if(curSeqId > unifiedSeqId) {
+      if (curSeqId > unifiedSeqId) {
         unifiedSeqId = curSeqId;
         unifiedSeqId = curSeqId;
       }
       }
     }
     }
@@ -301,7 +301,7 @@ public class HStoreFile implements HConstants, WritableComparable {
     
     
     try {
     try {
       byte flag = in.readByte();
       byte flag = in.readByte();
-      if(flag == INFO_SEQ_NUM) {
+      if (flag == INFO_SEQ_NUM) {
         return in.readLong();
         return in.readLong();
         
         
       } else {
       } else {
@@ -352,17 +352,17 @@ public class HStoreFile implements HConstants, WritableComparable {
   public int compareTo(Object o) {
   public int compareTo(Object o) {
     HStoreFile other = (HStoreFile) o;
     HStoreFile other = (HStoreFile) o;
     int result = this.dir.compareTo(other.dir);    
     int result = this.dir.compareTo(other.dir);    
-    if(result == 0) {
+    if (result == 0) {
       this.regionName.compareTo(other.regionName);
       this.regionName.compareTo(other.regionName);
     }
     }
-    if(result == 0) {
+    if (result == 0) {
       result = this.colFamily.compareTo(other.colFamily);
       result = this.colFamily.compareTo(other.colFamily);
     }    
     }    
-    if(result == 0) {
-      if(this.fileId < other.fileId) {
+    if (result == 0) {
+      if (this.fileId < other.fileId) {
         result = -1;
         result = -1;
         
         
-      } else if(this.fileId > other.fileId) {
+      } else if (this.fileId > other.fileId) {
         result = 1;
         result = 1;
       }
       }
     }
     }

+ 9 - 9
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java

@@ -29,7 +29,7 @@ public class HStoreKey implements WritableComparable {
   public static Text extractFamily(Text col) throws IOException {
   public static Text extractFamily(Text col) throws IOException {
     String column = col.toString();
     String column = col.toString();
     int colpos = column.indexOf(":");
     int colpos = column.indexOf(":");
-    if(colpos < 0) {
+    if (colpos < 0) {
       throw new IllegalArgumentException("Illegal column name has no family indicator: " + column);
       throw new IllegalArgumentException("Illegal column name has no family indicator: " + column);
     }
     }
     return new Text(column.substring(0, colpos));
     return new Text(column.substring(0, colpos));
@@ -94,8 +94,8 @@ public class HStoreKey implements WritableComparable {
   }
   }
   
   
   public boolean matchesRowCol(HStoreKey other) {
   public boolean matchesRowCol(HStoreKey other) {
-    if(this.row.compareTo(other.row) == 0 &&
-       this.column.compareTo(other.column) == 0) {
+    if (this.row.compareTo(other.row) == 0 &&
+        this.column.compareTo(other.column) == 0) {
       return true;
       return true;
       
       
     } else {
     } else {
@@ -104,8 +104,8 @@ public class HStoreKey implements WritableComparable {
   }
   }
   
   
   public boolean matchesWithoutColumn(HStoreKey other) {
   public boolean matchesWithoutColumn(HStoreKey other) {
-    if((this.row.compareTo(other.row) == 0) &&
-       (this.timestamp >= other.getTimestamp())) {
+    if ((this.row.compareTo(other.row) == 0) &&
+        (this.timestamp >= other.getTimestamp())) {
       return true;
       return true;
       
       
     } else {
     } else {
@@ -124,14 +124,14 @@ public class HStoreKey implements WritableComparable {
   public int compareTo(Object o) {
   public int compareTo(Object o) {
     HStoreKey other = (HStoreKey) o;
     HStoreKey other = (HStoreKey) o;
     int result = this.row.compareTo(other.row);
     int result = this.row.compareTo(other.row);
-    if(result == 0) {
+    if (result == 0) {
       result = this.column.compareTo(other.column);
       result = this.column.compareTo(other.column);
       
       
-      if(result == 0) {
-        if(this.timestamp < other.timestamp) {
+      if (result == 0) {
+        if (this.timestamp < other.timestamp) {
           result = 1;
           result = 1;
           
           
-        } else if(this.timestamp > other.timestamp) {
+        } else if (this.timestamp > other.timestamp) {
           result = -1;
           result = -1;
         }
         }
       }
       }

+ 7 - 7
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java

@@ -54,7 +54,7 @@ public class HTableDescriptor implements WritableComparable {
 
 
   /** Do we contain a given column? */
   /** Do we contain a given column? */
   public boolean hasFamily(Text family) {
   public boolean hasFamily(Text family) {
-    if(families.contains(family)) {
+    if (families.contains(family)) {
       return true;
       return true;
       
       
     } else {
     } else {
@@ -75,7 +75,7 @@ public class HTableDescriptor implements WritableComparable {
     name.write(out);
     name.write(out);
     out.writeInt(maxVersions);
     out.writeInt(maxVersions);
     out.writeInt(families.size());
     out.writeInt(families.size());
-    for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = families.iterator(); it.hasNext();) {
       it.next().write(out);
       it.next().write(out);
     }
     }
   }
   }
@@ -99,21 +99,21 @@ public class HTableDescriptor implements WritableComparable {
   public int compareTo(Object o) {
   public int compareTo(Object o) {
     HTableDescriptor htd = (HTableDescriptor) o;
     HTableDescriptor htd = (HTableDescriptor) o;
     int result = name.compareTo(htd.name);
     int result = name.compareTo(htd.name);
-    if(result == 0) {
+    if (result == 0) {
       result = maxVersions - htd.maxVersions;
       result = maxVersions - htd.maxVersions;
     }
     }
     
     
-    if(result == 0) {
+    if (result == 0) {
       result = families.size() - htd.families.size();
       result = families.size() - htd.families.size();
     }
     }
     
     
-    if(result == 0) {
+    if (result == 0) {
       Iterator<Text> it2 = htd.families.iterator();
       Iterator<Text> it2 = htd.families.iterator();
-      for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
+      for(Iterator<Text> it = families.iterator(); it.hasNext();) {
         Text family1 = it.next();
         Text family1 = it.next();
         Text family2 = it2.next();
         Text family2 = it2.next();
         result = family1.compareTo(family2);
         result = family1.compareTo(family2);
-        if(result != 0) {
+        if (result != 0) {
           return result;
           return result;
         }
         }
       }
       }

+ 6 - 6
src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java

@@ -77,7 +77,7 @@ public class Leases {
       synchronized(sortedLeases) {
       synchronized(sortedLeases) {
         Lease lease = new Lease(holderId, resourceId, listener);
         Lease lease = new Lease(holderId, resourceId, listener);
         Text leaseId = lease.getLeaseId();
         Text leaseId = lease.getLeaseId();
-        if(leases.get(leaseId) != null) {
+        if (leases.get(leaseId) != null) {
           throw new IOException("Impossible state for createLease(): Lease for holderId " + holderId + " and resourceId " + resourceId + " is still held.");
           throw new IOException("Impossible state for createLease(): Lease for holderId " + holderId + " and resourceId " + resourceId + " is still held.");
         }
         }
         leases.put(leaseId, lease);
         leases.put(leaseId, lease);
@@ -92,7 +92,7 @@ public class Leases {
       synchronized(sortedLeases) {
       synchronized(sortedLeases) {
         Text leaseId = createLeaseId(holderId, resourceId);
         Text leaseId = createLeaseId(holderId, resourceId);
         Lease lease = leases.get(leaseId);
         Lease lease = leases.get(leaseId);
-        if(lease == null) {
+        if (lease == null) {
           
           
           // It's possible that someone tries to renew the lease, but 
           // It's possible that someone tries to renew the lease, but 
           // it just expired a moment ago.  So fail.
           // it just expired a moment ago.  So fail.
@@ -113,7 +113,7 @@ public class Leases {
       synchronized(sortedLeases) {
       synchronized(sortedLeases) {
         Text leaseId = createLeaseId(holderId, resourceId);
         Text leaseId = createLeaseId(holderId, resourceId);
         Lease lease = leases.get(leaseId);
         Lease lease = leases.get(leaseId);
-        if(lease == null) {
+        if (lease == null) {
           
           
           // It's possible that someone tries to renew the lease, but 
           // It's possible that someone tries to renew the lease, but 
           // it just expired a moment ago.  So fail.
           // it just expired a moment ago.  So fail.
@@ -139,7 +139,7 @@ public class Leases {
             while((sortedLeases.size() > 0)
             while((sortedLeases.size() > 0)
                   && ((top = sortedLeases.first()) != null)) {
                   && ((top = sortedLeases.first()) != null)) {
               
               
-              if(top.shouldExpire()) {
+              if (top.shouldExpire()) {
                 leases.remove(top.getLeaseId());
                 leases.remove(top.getLeaseId());
                 sortedLeases.remove(top);
                 sortedLeases.remove(top);
 
 
@@ -205,10 +205,10 @@ public class Leases {
 
 
     public int compareTo(Object o) {
     public int compareTo(Object o) {
       Lease other = (Lease) o;
       Lease other = (Lease) o;
-      if(this.lastUpdate < other.lastUpdate) {
+      if (this.lastUpdate < other.lastUpdate) {
         return -1;
         return -1;
         
         
-      } else if(this.lastUpdate > other.lastUpdate) {
+      } else if (this.lastUpdate > other.lastUpdate) {
         return 1;
         return 1;
         
         
       } else {
       } else {

+ 10 - 10
src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java

@@ -29,27 +29,27 @@ public class Environment {
     String value = null;
     String value = null;
     
     
     value = System.getenv("DEBUGGING");
     value = System.getenv("DEBUGGING");
-    if(value != null && value.equalsIgnoreCase("TRUE")) {
+    if (value != null && value.equalsIgnoreCase("TRUE")) {
       debugging = true;
       debugging = true;
     }
     }
     
     
     value = System.getenv("LOGGING_LEVEL");
     value = System.getenv("LOGGING_LEVEL");
-    if(value != null && value.length() != 0) {
-      if(value.equalsIgnoreCase("ALL")) {
+    if (value != null && value.length() != 0) {
+      if (value.equalsIgnoreCase("ALL")) {
         logLevel = Level.ALL;
         logLevel = Level.ALL;
-      } else if(value.equalsIgnoreCase("DEBUG")) {
+      } else if (value.equalsIgnoreCase("DEBUG")) {
         logLevel = Level.DEBUG;
         logLevel = Level.DEBUG;
-      } else if(value.equalsIgnoreCase("ERROR")) {
+      } else if (value.equalsIgnoreCase("ERROR")) {
         logLevel = Level.ERROR;
         logLevel = Level.ERROR;
-      } else if(value.equalsIgnoreCase("FATAL")) {
+      } else if (value.equalsIgnoreCase("FATAL")) {
         logLevel = Level.FATAL;
         logLevel = Level.FATAL;
-      } else if(value.equalsIgnoreCase("INFO")) {
+      } else if (value.equalsIgnoreCase("INFO")) {
         logLevel = Level.INFO;
         logLevel = Level.INFO;
-      } else if(value.equalsIgnoreCase("OFF")) {
+      } else if (value.equalsIgnoreCase("OFF")) {
         logLevel = Level.OFF;
         logLevel = Level.OFF;
-      } else if(value.equalsIgnoreCase("TRACE")) {
+      } else if (value.equalsIgnoreCase("TRACE")) {
         logLevel = Level.TRACE;
         logLevel = Level.TRACE;
-      } else if(value.equalsIgnoreCase("WARN")) {
+      } else if (value.equalsIgnoreCase("WARN")) {
         logLevel = Level.WARN;
         logLevel = Level.WARN;
       }
       }
     }
     }

+ 27 - 27
src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java

@@ -90,7 +90,7 @@ public class TestHRegion extends TestCase {
   
   
   public void testSetup() throws IOException {
   public void testSetup() throws IOException {
     try {
     try {
-      if(System.getProperty("test.build.data") == null) {
+      if (System.getProperty("test.build.data") == null) {
         String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
         String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
         System.out.println(dir);
         System.out.println(dir);
         System.setProperty("test.build.data", dir);
         System.setProperty("test.build.data", dir);
@@ -98,7 +98,7 @@ public class TestHRegion extends TestCase {
       conf = new Configuration();
       conf = new Configuration();
       
       
       Environment.getenv();
       Environment.getenv();
-      if(Environment.debugging) {
+      if (Environment.debugging) {
         Logger rootLogger = Logger.getRootLogger();
         Logger rootLogger = Logger.getRootLogger();
         rootLogger.setLevel(Level.WARN);
         rootLogger.setLevel(Level.WARN);
         
         
@@ -133,7 +133,7 @@ public class TestHRegion extends TestCase {
   // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
   // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
 
 
   public void testBasic() throws IOException {
   public void testBasic() throws IOException {
-    if(!initialized) {
+    if (!initialized) {
       throw new IllegalStateException();
       throw new IllegalStateException();
     }
     }
 
 
@@ -191,7 +191,7 @@ public class TestHRegion extends TestCase {
   // Test scanners. Writes contents:firstcol and anchor:secondcol
   // Test scanners. Writes contents:firstcol and anchor:secondcol
   
   
   public void testScan() throws IOException {
   public void testScan() throws IOException {
-    if(!initialized) {
+    if (!initialized) {
       throw new IllegalStateException();
       throw new IllegalStateException();
     }
     }
 
 
@@ -225,13 +225,13 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       int k = 0;
       while(s.next(curKey, curVals)) {
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           Text col = it.next();
           byte val[] = curVals.get(col);
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
           int curval = Integer.parseInt(new String(val).trim());
 
 
           for(int j = 0; j < cols.length; j++) {
           for(int j = 0; j < cols.length; j++) {
-            if(col.compareTo(cols[j]) == 0) {
+            if (col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                            + ", Value for " + col + " should be: " + k
                            + ", Value for " + col + " should be: " + k
                            + ", but was fetched as: " + curval, k, curval);
                            + ", but was fetched as: " + curval, k, curval);
@@ -258,13 +258,13 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       int k = 0;
       while(s.next(curKey, curVals)) {
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           Text col = it.next();
           byte val[] = curVals.get(col);
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
           int curval = Integer.parseInt(new String(val).trim());
 
 
           for(int j = 0; j < cols.length; j++) {
           for(int j = 0; j < cols.length; j++) {
-            if(col.compareTo(cols[j]) == 0) {
+            if (col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                            + ", Value for " + col + " should be: " + k
                            + ", Value for " + col + " should be: " + k
                            + ", but was fetched as: " + curval, k, curval);
                            + ", but was fetched as: " + curval, k, curval);
@@ -299,13 +299,13 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       int k = 0;
       while(s.next(curKey, curVals)) {
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           Text col = it.next();
           byte val[] = curVals.get(col);
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
           int curval = Integer.parseInt(new String(val).trim());
 
 
           for(int j = 0; j < cols.length; j++) {
           for(int j = 0; j < cols.length; j++) {
-            if(col.compareTo(cols[j]) == 0) {
+            if (col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                            + ", Value for " + col + " should be: " + k
                            + ", Value for " + col + " should be: " + k
                            + ", but was fetched as: " + curval, k, curval);
                            + ", but was fetched as: " + curval, k, curval);
@@ -332,7 +332,7 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       int k = 0;
       while(s.next(curKey, curVals)) {
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           Text col = it.next();
           byte val[] = curVals.get(col);
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
           int curval = Integer.parseInt(new String(val).trim());
@@ -362,7 +362,7 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 500;
       int k = 500;
       while(s.next(curKey, curVals)) {
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           Text col = it.next();
           byte val[] = curVals.get(col);
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
           int curval = Integer.parseInt(new String(val).trim());
@@ -390,10 +390,10 @@ public class TestHRegion extends TestCase {
   // Creates contents:body
   // Creates contents:body
   
   
   public void testBatchWrite() throws IOException {
   public void testBatchWrite() throws IOException {
-    if(!initialized || failures) {
+    if (!initialized || failures) {
       throw new IllegalStateException();
       throw new IllegalStateException();
     }
     }
-    if(! Environment.debugging) {
+    if (!Environment.debugging) {
       return;
       return;
     }
     }
 
 
@@ -437,7 +437,7 @@ public class TestHRegion extends TestCase {
         }
         }
       }
       }
       long startCompact = System.currentTimeMillis();
       long startCompact = System.currentTimeMillis();
-      if(region.compactStores()) {
+      if (region.compactStores()) {
         totalCompact = System.currentTimeMillis() - startCompact;
         totalCompact = System.currentTimeMillis() - startCompact;
         System.out.println("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
         System.out.println("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
         
         
@@ -467,14 +467,14 @@ public class TestHRegion extends TestCase {
   // NOTE: This test depends on testBatchWrite succeeding
   // NOTE: This test depends on testBatchWrite succeeding
   
   
   public void testSplitAndMerge() throws IOException {
   public void testSplitAndMerge() throws IOException {
-    if(!initialized || failures) {
+    if (!initialized || failures) {
       throw new IllegalStateException();
       throw new IllegalStateException();
     }
     }
     
     
     try {
     try {
       Text midKey = new Text();
       Text midKey = new Text();
       
       
-      if(region.needsSplit(midKey)) {
+      if (region.needsSplit(midKey)) {
         System.out.println("Needs split");
         System.out.println("Needs split");
       }
       }
       
       
@@ -504,7 +504,7 @@ public class TestHRegion extends TestCase {
   // This test verifies that everything is still there after splitting and merging
   // This test verifies that everything is still there after splitting and merging
   
   
   public void testRead() throws IOException {
   public void testRead() throws IOException {
-    if(!initialized || failures) {
+    if (!initialized || failures) {
       throw new IllegalStateException();
       throw new IllegalStateException();
     }
     }
 
 
@@ -525,19 +525,19 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       int k = 0;
       while(s.next(curKey, curVals)) {
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           Text col = it.next();
           byte val[] = curVals.get(col);
           byte val[] = curVals.get(col);
           String curval = new String(val).trim();
           String curval = new String(val).trim();
 
 
-          if(col.compareTo(CONTENTS_BASIC) == 0) {
+          if (col.compareTo(CONTENTS_BASIC) == 0) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                        + ", Value for " + col + " should start with: " + CONTENTSTR
                        + ", Value for " + col + " should start with: " + CONTENTSTR
                        + ", but was fetched as: " + curval,
                        + ", but was fetched as: " + curval,
                        curval.startsWith(CONTENTSTR));
                        curval.startsWith(CONTENTSTR));
             contentsFetched++;
             contentsFetched++;
             
             
-          } else if(col.toString().startsWith(ANCHORNUM)) {
+          } else if (col.toString().startsWith(ANCHORNUM)) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                        + ", Value for " + col + " should start with: " + ANCHORSTR
                        + ", Value for " + col + " should start with: " + ANCHORSTR
                        + ", but was fetched as: " + curval,
                        + ", but was fetched as: " + curval,
@@ -572,7 +572,7 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       int k = 0;
       while(s.next(curKey, curVals)) {
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           Text col = it.next();
           byte val[] = curVals.get(col);
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
           int curval = Integer.parseInt(new String(val).trim());
@@ -596,7 +596,7 @@ public class TestHRegion extends TestCase {
     
     
     // Verify testBatchWrite data
     // Verify testBatchWrite data
 
 
-    if(Environment.debugging) {
+    if (Environment.debugging) {
       s = region.getScanner(new Text[] { CONTENTS_BODY }, new Text());
       s = region.getScanner(new Text[] { CONTENTS_BODY }, new Text());
       try {
       try {
         int numFetched = 0;
         int numFetched = 0;
@@ -604,7 +604,7 @@ public class TestHRegion extends TestCase {
         TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
         TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
         int k = 0;
         int k = 0;
         while(s.next(curKey, curVals)) {
         while(s.next(curKey, curVals)) {
-          for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+          for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
             Text col = it.next();
             Text col = it.next();
             byte val[] = curVals.get(col);
             byte val[] = curVals.get(col);
 
 
@@ -635,7 +635,7 @@ public class TestHRegion extends TestCase {
       HStoreKey curKey = new HStoreKey();
       HStoreKey curKey = new HStoreKey();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       while(s.next(curKey, curVals)) {
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           it.next();
           it.next();
           fetched++;
           fetched++;
         }
         }
@@ -650,7 +650,7 @@ public class TestHRegion extends TestCase {
 
 
   
   
   private static void deleteFile(File f) {
   private static void deleteFile(File f) {
-    if(f.isDirectory()) {
+    if (f.isDirectory()) {
       File[] children = f.listFiles();
       File[] children = f.listFiles();
       for(int i = 0; i < children.length; i++) {
       for(int i = 0; i < children.length; i++) {
         deleteFile(children[i]);
         deleteFile(children[i]);
@@ -660,7 +660,7 @@ public class TestHRegion extends TestCase {
   }
   }
   
   
   public void testCleanup() throws IOException {
   public void testCleanup() throws IOException {
-    if(!initialized) {
+    if (!initialized) {
       throw new IllegalStateException();
       throw new IllegalStateException();
     }
     }
 
 

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java

@@ -219,7 +219,7 @@ class CompoundDirSpec {
   public static String expandGlobInputSpec(String inputSpec, JobConf job)
   public static String expandGlobInputSpec(String inputSpec, JobConf job)
   {
   {
     inputSpec = inputSpec.trim();
     inputSpec = inputSpec.trim();
-    if(!inputSpec.startsWith(MERGEGLOB_PREFIX)) {
+    if (!inputSpec.startsWith(MERGEGLOB_PREFIX)) {
       return inputSpec;
       return inputSpec;
     }
     }
     inputSpec = inputSpec.substring(MERGEGLOB_PREFIX.length());
     inputSpec = inputSpec.substring(MERGEGLOB_PREFIX.length());

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java

@@ -115,7 +115,7 @@ public class JarBuilder {
     JarEntry entry = null;
     JarEntry entry = null;
     while (entries.hasMoreElements()) {
     while (entries.hasMoreElements()) {
       entry = (JarEntry) entries.nextElement();
       entry = (JarEntry) entries.nextElement();
-      //if(entry.getName().startsWith("META-INF/")) continue; 
+      //if (entry.getName().startsWith("META-INF/")) continue; 
       InputStream in = src.getInputStream(entry);
       InputStream in = src.getInputStream(entry);
       addNamedStream(dst, entry.getName(), in);
       addNamedStream(dst, entry.getName(), in);
     }
     }

+ 3 - 3
src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java

@@ -82,7 +82,7 @@ public class MergerInputFormat extends InputFormatBase {
   /** Delegate to the primary InputFormat. 
   /** Delegate to the primary InputFormat. 
       Force full-file splits since there's no index to sync secondaries.
       Force full-file splits since there's no index to sync secondaries.
       (and if there was, this index may need to be created for the first time
       (and if there was, this index may need to be created for the first time
-      full file at a time...    )
+      full file at a time...   )
   */
   */
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
     return ((StreamInputFormat) primary_).getSplits(job, numSplits);
     return ((StreamInputFormat) primary_).getSplits(job, numSplits);
@@ -121,7 +121,7 @@ public class MergerInputFormat extends InputFormatBase {
   /*
   /*
     private FileSplit relatedSplit(FileSplit primarySplit, int i, CompoundDirSpec spec) throws IOException
     private FileSplit relatedSplit(FileSplit primarySplit, int i, CompoundDirSpec spec) throws IOException
     {
     {
-    if(i == 0) {
+    if (i == 0) {
     return primarySplit;
     return primarySplit;
     }
     }
 
 
@@ -330,7 +330,7 @@ class MergeRecordStream {
   Writable v_;
   Writable v_;
 
 
   public MergeRecordStream(int index, RecordReader reader, WritableComparable k, Writable v)
   public MergeRecordStream(int index, RecordReader reader, WritableComparable k, Writable v)
-      throws IOException {
+    throws IOException {
     index_ = index;
     index_ = index;
     reader_ = reader;
     reader_ = reader;
     k_ = k;
     k_ = k;

+ 2 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java

@@ -261,7 +261,7 @@ public abstract class PipeMapRed {
           finalOutputURI = new URI(sideEffectPathFinal_.toString()); // implicit dfs: 
           finalOutputURI = new URI(sideEffectPathFinal_.toString()); // implicit dfs: 
         }
         }
         // apply default scheme
         // apply default scheme
-        if(finalOutputURI.getScheme() == null) {
+        if (finalOutputURI.getScheme() == null) {
           finalOutputURI = new URI("file", finalOutputURI.getSchemeSpecificPart(), null);
           finalOutputURI = new URI("file", finalOutputURI.getSchemeSpecificPart(), null);
         }
         }
         boolean allowSocket = useSingleSideOutputURI_;
         boolean allowSocket = useSingleSideOutputURI_;
@@ -579,7 +579,7 @@ public abstract class PipeMapRed {
           logprintln("closing " + finalOutputURI);
           logprintln("closing " + finalOutputURI);
           if (sideEffectOut_ != null) sideEffectOut_.close();
           if (sideEffectOut_ != null) sideEffectOut_.close();
           logprintln("closed  " + finalOutputURI);
           logprintln("closed  " + finalOutputURI);
-          if ( ! useSingleSideOutputURI_) {
+          if (!useSingleSideOutputURI_) {
             ((PhasedFileSystem)sideFs_).commit(); 
             ((PhasedFileSystem)sideFs_).commit(); 
           }
           }
         }
         }

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java

@@ -70,7 +70,7 @@ public class PipeMapper extends PipeMapRed implements Mapper {
     if (outThread_ == null) {
     if (outThread_ == null) {
       startOutputThreads(output, reporter);
       startOutputThreads(output, reporter);
     }
     }
-    if( outerrThreadsThrowable != null ) {
+    if (outerrThreadsThrowable != null) {
       mapRedFinished();
       mapRedFinished();
       throw new IOException ("MROutput/MRErrThread failed:"
       throw new IOException ("MROutput/MRErrThread failed:"
                              + StringUtils.stringifyException(
                              + StringUtils.stringifyException(

+ 2 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java

@@ -74,10 +74,10 @@ public class PipeReducer extends PipeMapRed implements Reducer {
         numRecRead_++;
         numRecRead_++;
         maybeLogRecord();
         maybeLogRecord();
         if (doPipe_) {
         if (doPipe_) {
-          if( outerrThreadsThrowable != null ) {
+          if (outerrThreadsThrowable != null) {
             mapRedFinished();
             mapRedFinished();
             throw new IOException ("MROutput/MRErrThread failed:"
             throw new IOException ("MROutput/MRErrThread failed:"
-                                   + StringUtils.stringifyException( 
+                                   + StringUtils.stringifyException(
                                                                     outerrThreadsThrowable));
                                                                     outerrThreadsThrowable));
           }
           }
           write(key);
           write(key);

+ 39 - 39
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java

@@ -82,7 +82,7 @@ public class StreamJob {
     new DefaultOptionBuilder("-","-", false);
     new DefaultOptionBuilder("-","-", false);
   private ArgumentBuilder argBuilder = new ArgumentBuilder(); 
   private ArgumentBuilder argBuilder = new ArgumentBuilder(); 
   private Parser parser = new Parser(); 
   private Parser parser = new Parser(); 
-  private Group allOptions ; 
+  private Group allOptions; 
   HelpFormatter helpFormatter = new HelpFormatter("  ", "  ", "  ", 900);
   HelpFormatter helpFormatter = new HelpFormatter("  ", "  ", "  ", 900);
   // need these two at class level to extract values later from 
   // need these two at class level to extract values later from 
   // commons-cli command line
   // commons-cli command line
@@ -197,7 +197,7 @@ public class StreamJob {
   }
   }
 
 
   void parseArgv(){
   void parseArgv(){
-    CommandLine cmdLine = null ; 
+    CommandLine cmdLine = null; 
     try{
     try{
       cmdLine = parser.parse(argv_);
       cmdLine = parser.parse(argv_);
     }catch(Exception oe){
     }catch(Exception oe){
@@ -209,10 +209,10 @@ public class StreamJob {
       }
       }
     }
     }
     
     
-    if( cmdLine != null ){
-      verbose_ =  cmdLine.hasOption("-verbose") ;
-      detailedUsage_ = cmdLine.hasOption("-info") ;
-      debug_ = cmdLine.hasOption("-debug")? debug_ + 1 : debug_ ;
+    if (cmdLine != null){
+      verbose_ =  cmdLine.hasOption("-verbose");
+      detailedUsage_ = cmdLine.hasOption("-info");
+      debug_ = cmdLine.hasOption("-debug")? debug_ + 1 : debug_;
       inputTagged_ = cmdLine.hasOption("-inputtagged"); 
       inputTagged_ = cmdLine.hasOption("-inputtagged"); 
       
       
       inputSpecs_.addAll(cmdLine.getValues("-input"));
       inputSpecs_.addAll(cmdLine.getValues("-input"));
@@ -230,12 +230,12 @@ public class StreamJob {
       configPath_.addAll(cmdLine.getValues("-config"));
       configPath_.addAll(cmdLine.getValues("-config"));
       
       
       String fsName = (String)cmdLine.getValue("-dfs");
       String fsName = (String)cmdLine.getValue("-dfs");
-      if( null != fsName ){
+      if (null != fsName){
         userJobConfProps_.put("fs.default.name", fsName);        
         userJobConfProps_.put("fs.default.name", fsName);        
       }
       }
       
       
       String jt = (String)cmdLine.getValue("mapred.job.tracker");
       String jt = (String)cmdLine.getValue("mapred.job.tracker");
-      if( null != jt ){
+      if (null != jt){
         userJobConfProps_.put("fs.default.name", jt);        
         userJobConfProps_.put("fs.default.name", jt);        
       }
       }
       
       
@@ -246,15 +246,15 @@ public class StreamJob {
       inReaderSpec_ = (String)cmdLine.getValue("-inputreader"); 
       inReaderSpec_ = (String)cmdLine.getValue("-inputreader"); 
       
       
       List<String> car = cmdLine.getValues("-cacheArchive"); 
       List<String> car = cmdLine.getValues("-cacheArchive"); 
-      if( null != car ){
-        for( String s : car ){
+      if (null != car){
+        for(String s : car){
           cacheArchives = (cacheArchives == null)?s :cacheArchives + "," + s;  
           cacheArchives = (cacheArchives == null)?s :cacheArchives + "," + s;  
         }
         }
       }
       }
 
 
       List<String> caf = cmdLine.getValues("-cacheFile"); 
       List<String> caf = cmdLine.getValues("-cacheFile"); 
-      if( null != caf ){
-        for( String s : caf ){
+      if (null != caf){
+        for(String s : caf){
           cacheFiles = (cacheFiles == null)?s :cacheFiles + "," + s;  
           cacheFiles = (cacheFiles == null)?s :cacheFiles + "," + s;  
         }
         }
       }
       }
@@ -262,14 +262,14 @@ public class StreamJob {
       List<String> jobConfArgs = (List<String>)cmdLine.getValue(jobconf); 
       List<String> jobConfArgs = (List<String>)cmdLine.getValue(jobconf); 
       List<String> envArgs = (List<String>)cmdLine.getValue(cmdenv); 
       List<String> envArgs = (List<String>)cmdLine.getValue(cmdenv); 
       
       
-      if( null != jobConfArgs ){
-        for( String s : jobConfArgs){
+      if (null != jobConfArgs){
+        for(String s : jobConfArgs){
           String []parts = s.split("="); 
           String []parts = s.split("="); 
           userJobConfProps_.put(parts[0], parts[1]);
           userJobConfProps_.put(parts[0], parts[1]);
         }
         }
       }
       }
-      if( null != envArgs ){
-        for( String s : envArgs ){
+      if (null != envArgs){
+        for(String s : envArgs){
           if (addTaskEnvironment_.length() > 0) {
           if (addTaskEnvironment_.length() > 0) {
             addTaskEnvironment_ += " ";
             addTaskEnvironment_ += " ";
           }
           }
@@ -310,7 +310,7 @@ public class StreamJob {
       withMinimum(1).
       withMinimum(1).
       withMaximum(max).
       withMaximum(max).
       withValidator(validator).
       withValidator(validator).
-      create() ;
+      create();
    
    
     return builder.
     return builder.
       withLongName(name).
       withLongName(name).
@@ -332,15 +332,15 @@ public class StreamJob {
           // an can exec check in java 6
           // an can exec check in java 6
           for (String file : (List<String>)values) {
           for (String file : (List<String>)values) {
             File f = new File(file);  
             File f = new File(file);  
-            if ( ! f.exists() ) {
+            if (!f.exists()) {
               throw new InvalidArgumentException("Argument : " + 
               throw new InvalidArgumentException("Argument : " + 
                                                  f.getAbsolutePath() + " doesn't exist."); 
                                                  f.getAbsolutePath() + " doesn't exist."); 
             }
             }
-            if ( ! f.isFile() ) {
+            if (!f.isFile()) {
               throw new InvalidArgumentException("Argument : " + 
               throw new InvalidArgumentException("Argument : " + 
                                                  f.getAbsolutePath() + " is not a file."); 
                                                  f.getAbsolutePath() + " is not a file."); 
             }
             }
-            if ( ! f.canRead() ) {
+            if (!f.canRead()) {
               throw new InvalidArgumentException("Argument : " + 
               throw new InvalidArgumentException("Argument : " + 
                                                  f.getAbsolutePath() + " is not accessible"); 
                                                  f.getAbsolutePath() + " is not accessible"); 
             }
             }
@@ -378,7 +378,7 @@ public class StreamJob {
     Option mapper  = createOption("mapper", 
     Option mapper  = createOption("mapper", 
                                   "The streaming command to run", "cmd", 1, false);
                                   "The streaming command to run", "cmd", 1, false);
     Option combiner = createOption("combiner", 
     Option combiner = createOption("combiner", 
-                                   "The streaming command to run", "cmd",1, false);
+                                   "The streaming command to run", "cmd", 1, false);
     // reducer could be NONE 
     // reducer could be NONE 
     Option reducer = createOption("reducer", 
     Option reducer = createOption("reducer", 
                                   "The streaming command to run", "cmd", 1, false); 
                                   "The streaming command to run", "cmd", 1, false); 
@@ -388,21 +388,21 @@ public class StreamJob {
     Option dfs = createOption("dfs", 
     Option dfs = createOption("dfs", 
                               "Optional. Override DFS configuration", "<h:p>|local", 1, false); 
                               "Optional. Override DFS configuration", "<h:p>|local", 1, false); 
     Option jt = createOption("jt", 
     Option jt = createOption("jt", 
-                             "Optional. Override JobTracker configuration", "<h:p>|local",1, false);
+                             "Optional. Override JobTracker configuration", "<h:p>|local", 1, false);
     Option additionalconfspec = createOption("additionalconfspec", 
     Option additionalconfspec = createOption("additionalconfspec", 
-                                             "Optional.", "spec",1, false );
+                                             "Optional.", "spec", 1, false);
     Option inputformat = createOption("inputformat", 
     Option inputformat = createOption("inputformat", 
-                                      "Optional.", "spec",1, false );
+                                      "Optional.", "spec", 1, false);
     Option outputformat = createOption("outputformat", 
     Option outputformat = createOption("outputformat", 
-                                       "Optional.", "spec",1, false );
+                                       "Optional.", "spec", 1, false);
     Option partitioner = createOption("partitioner", 
     Option partitioner = createOption("partitioner", 
-                                      "Optional.", "spec",1, false );
+                                      "Optional.", "spec", 1, false);
     Option inputreader = createOption("inputreader", 
     Option inputreader = createOption("inputreader", 
-                                      "Optional.", "spec",1, false );
+                                      "Optional.", "spec", 1, false);
     Option cacheFile = createOption("cacheFile", 
     Option cacheFile = createOption("cacheFile", 
                                     "File name URI", "fileNameURI", 1, false);
                                     "File name URI", "fileNameURI", 1, false);
     Option cacheArchive = createOption("cacheArchive", 
     Option cacheArchive = createOption("cacheArchive", 
-                                       "File name URI", "fileNameURI",1, false);
+                                       "File name URI", "fileNameURI", 1, false);
     
     
     // boolean properties
     // boolean properties
     
     
@@ -844,7 +844,7 @@ public class StreamJob {
     if (cacheFiles != null)
     if (cacheFiles != null)
       DistributedCache.setCacheFiles(fileURIs, jobConf_);
       DistributedCache.setCacheFiles(fileURIs, jobConf_);
     
     
-    if(verbose_) {
+    if (verbose_) {
       listJobConfProperties();
       listJobConfProperties();
     }
     }
    
    
@@ -956,7 +956,7 @@ public class StreamJob {
       LOG.info("To kill this job, run:");
       LOG.info("To kill this job, run:");
       LOG.info(getHadoopClientHome() + "/bin/hadoop job  -Dmapred.job.tracker=" + hp + " -kill "
       LOG.info(getHadoopClientHome() + "/bin/hadoop job  -Dmapred.job.tracker=" + hp + " -kill "
                + jobId_);
                + jobId_);
-      //LOG.info("Job file: " + running_.getJobFile() );
+      //LOG.info("Job file: " + running_.getJobFile());
       LOG.info("Tracking URL: " + StreamUtil.qualifyHost(running_.getTrackingURL()));
       LOG.info("Tracking URL: " + StreamUtil.qualifyHost(running_.getTrackingURL()));
     }
     }
   }
   }
@@ -1012,7 +1012,7 @@ public class StreamJob {
     }catch(FileAlreadyExistsException fae){
     }catch(FileAlreadyExistsException fae){
       LOG.error("Error launching job , Output path already exists : " 
       LOG.error("Error launching job , Output path already exists : " 
                 + fae.getMessage());
                 + fae.getMessage());
-    }catch( IOException ioe){
+    }catch(IOException ioe){
       LOG.error("Error Launching job : " + ioe.getMessage());
       LOG.error("Error Launching job : " + ioe.getMessage());
     }
     }
     finally {
     finally {
@@ -1025,7 +1025,7 @@ public class StreamJob {
   }
   }
   /** Support -jobconf x=y x1=y1 type options **/
   /** Support -jobconf x=y x1=y1 type options **/
   class MultiPropertyOption extends PropertyOption{
   class MultiPropertyOption extends PropertyOption{
-    private String optionString ; 
+    private String optionString; 
     MultiPropertyOption(){
     MultiPropertyOption(){
       super(); 
       super(); 
     }
     }
@@ -1033,7 +1033,7 @@ public class StreamJob {
     MultiPropertyOption(final String optionString,
     MultiPropertyOption(final String optionString,
                         final String description,
                         final String description,
                         final int id){
                         final int id){
-      super(optionString, description, id) ; 
+      super(optionString, description, id); 
       this.optionString = optionString;
       this.optionString = optionString;
     }
     }
 
 
@@ -1053,10 +1053,10 @@ public class StreamJob {
       }
       }
       
       
       ArrayList properties = new ArrayList(); 
       ArrayList properties = new ArrayList(); 
-      String next = "" ; 
-      while( arguments.hasNext()){
+      String next = ""; 
+      while(arguments.hasNext()){
         next = (String) arguments.next();
         next = (String) arguments.next();
-        if( ! next.startsWith("-") ){
+        if (!next.startsWith("-")){
           properties.add(next);
           properties.add(next);
         }else{
         }else{
           arguments.previous();
           arguments.previous();
@@ -1064,9 +1064,9 @@ public class StreamJob {
         }
         }
       } 
       } 
 
 
-      // add to any existing values ( support specifying args multiple times)
-      List<String> oldVal = (List<String>)commandLine.getValue(this) ; 
-      if( oldVal == null ){
+      // add to any existing values (support specifying args multiple times)
+      List<String> oldVal = (List<String>)commandLine.getValue(this); 
+      if (oldVal == null){
         commandLine.addValue(this, properties);
         commandLine.addValue(this, properties);
       }else{
       }else{
         oldVal.addAll(properties); 
         oldVal.addAll(properties); 

+ 2 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java

@@ -494,10 +494,10 @@ public class StreamUtil {
   public static String getBoundAntProperty(String name, String defaultVal)
   public static String getBoundAntProperty(String name, String defaultVal)
   {
   {
     String val = System.getProperty(name);
     String val = System.getProperty(name);
-    if(val != null && val.indexOf("${") >= 0) {
+    if (val != null && val.indexOf("${") >= 0) {
       val = null;
       val = null;
     }
     }
-    if(val == null) {
+    if (val == null) {
       val = defaultVal;
       val = defaultVal;
     }
     }
     return val;
     return val;

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java

@@ -101,7 +101,7 @@ public class StreamXmlRecordReader extends StreamBaseRecordReader {
     ((Text) key).set(record);
     ((Text) key).set(record);
     ((Text) value).set("");
     ((Text) value).set("");
 
 
-    /*if(numNext < 5) {
+    /*if (numNext < 5) {
       System.out.println("@@@ " + numNext + ". true next k=|" + key.toString().replaceAll("[\\r\\n]", " ")
       System.out.println("@@@ " + numNext + ". true next k=|" + key.toString().replaceAll("[\\r\\n]", " ")
       + "|, len=" + buf.length() + " v=|" + value.toString().replaceAll("[\\r\\n]", " ") + "|");
       + "|, len=" + buf.length() + " v=|" + value.toString().replaceAll("[\\r\\n]", " ") + "|");
       }*/
       }*/

+ 6 - 6
src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java

@@ -39,7 +39,7 @@ public class UTF8ByteArrayUtils {
    */
    */
   public static int findTab(byte [] utf, int start, int length) {
   public static int findTab(byte [] utf, int start, int length) {
     for(int i=start; i<(start+length); i++) {
     for(int i=start; i<(start+length); i++) {
-      if(utf[i]==(byte)'\t') {
+      if (utf[i]==(byte)'\t') {
         return i;
         return i;
       }
       }
     }
     }
@@ -68,9 +68,9 @@ public class UTF8ByteArrayUtils {
    */
    */
   public static void splitKeyVal(byte[] utf, int start, int length, 
   public static void splitKeyVal(byte[] utf, int start, int length, 
                                  Text key, Text val, int splitPos) throws IOException {
                                  Text key, Text val, int splitPos) throws IOException {
-    if(splitPos<start || splitPos >= (start+length))
-      throw new IllegalArgumentException( "splitPos must be in the range " +
-                                          "[" + start + ", " + (start+length) + "]: " + splitPos);
+    if (splitPos<start || splitPos >= (start+length))
+      throw new IllegalArgumentException("splitPos must be in the range " +
+                                         "[" + start + ", " + (start+length) + "]: " + splitPos);
     int keyLen = (splitPos-start);
     int keyLen = (splitPos-start);
     byte [] keyBytes = new byte[keyLen];
     byte [] keyBytes = new byte[keyLen];
     System.arraycopy(utf, start, keyBytes, 0, keyLen);
     System.arraycopy(utf, start, keyBytes, 0, keyLen);
@@ -122,7 +122,7 @@ public class UTF8ByteArrayUtils {
       if (c == '\r') {
       if (c == '\r') {
         in.mark(1);
         in.mark(1);
         int c2 = in.read();
         int c2 = in.read();
-        if(c2 == -1) {
+        if (c2 == -1) {
           isEOF = true;
           isEOF = true;
           break;
           break;
         }
         }
@@ -142,7 +142,7 @@ public class UTF8ByteArrayUtils {
       buf[offset++] = (byte) c;
       buf[offset++] = (byte) c;
     }
     }
 
 
-    if(isEOF && offset==0) {
+    if (isEOF && offset==0) {
       return null;
       return null;
     } else {
     } else {
       lineBuffer = new byte[offset];
       lineBuffer = new byte[offset];

+ 4 - 4
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java

@@ -217,7 +217,7 @@ public class TestStreamedMerge extends TestCase {
     String overrideFS = StreamUtil.getBoundAntProperty("fs.default.name", null);
     String overrideFS = StreamUtil.getBoundAntProperty("fs.default.name", null);
     MiniDFSCluster cluster = null;
     MiniDFSCluster cluster = null;
     try {
     try {
-      if(overrideFS == null) {
+      if (overrideFS == null) {
         cluster = new MiniDFSCluster(conf_, 1, true, null);
         cluster = new MiniDFSCluster(conf_, 1, true, null);
         fs_ = cluster.getFileSystem();
         fs_ = cluster.getFileSystem();
       } else {
       } else {
@@ -265,7 +265,7 @@ public class TestStreamedMerge extends TestCase {
     } else {
     } else {
       String userOut = StreamUtil.getBoundAntProperty(
       String userOut = StreamUtil.getBoundAntProperty(
                                                       "hadoop.test.localoutputfile", null);
                                                       "hadoop.test.localoutputfile", null);
-      if(userOut != null) {
+      if (userOut != null) {
         f = new File(userOut);
         f = new File(userOut);
         // don't delete so they can mkfifo
         // don't delete so they can mkfifo
         maybeFifoOutput_ = true;
         maybeFifoOutput_ = true;
@@ -275,7 +275,7 @@ public class TestStreamedMerge extends TestCase {
         maybeFifoOutput_ = false;
         maybeFifoOutput_ = false;
       }
       }
       String s = new Path(f.getAbsolutePath()).toString();
       String s = new Path(f.getAbsolutePath()).toString();
-      if(! s.startsWith("/")) {
+      if (!s.startsWith("/")) {
         s = "/" + s; // Windows "file:/C:/"
         s = "/" + s; // Windows "file:/C:/"
       }
       }
       sideOutput = "file:" + s;
       sideOutput = "file:" + s;
@@ -292,7 +292,7 @@ public class TestStreamedMerge extends TestCase {
       }
       }
       output = outputBuf.toString();
       output = outputBuf.toString();
     } else {
     } else {
-      if(maybeFifoOutput_) {
+      if (maybeFifoOutput_) {
         System.out.println("assertEquals will fail.");
         System.out.println("assertEquals will fail.");
         output = "potential FIFO: not retrieving to avoid blocking on open() "
         output = "potential FIFO: not retrieving to avoid blocking on open() "
           + f.getAbsoluteFile();
           + f.getAbsoluteFile();

+ 3 - 3
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java

@@ -68,7 +68,7 @@ public class TrApp
   void expect(String evName, String evVal) throws IOException
   void expect(String evName, String evVal) throws IOException
   {
   {
     String got = env.getProperty(evName);
     String got = env.getProperty(evName);
-    if(! evVal.equals(got)) {
+    if (!evVal.equals(got)) {
       String msg = "FAIL evName=" + evName + " got=" + got + " expect=" + evVal;
       String msg = "FAIL evName=" + evName + " got=" + got + " expect=" + evVal;
       throw new IOException(msg);
       throw new IOException(msg);
     }
     }
@@ -77,7 +77,7 @@ public class TrApp
   void expectDefined(String evName) throws IOException
   void expectDefined(String evName) throws IOException
   {
   {
     String got = env.getProperty(evName);
     String got = env.getProperty(evName);
-    if(got == null) {
+    if (got == null) {
       String msg = "FAIL evName=" + evName + " is undefined. Expect defined.";
       String msg = "FAIL evName=" + evName + " is undefined. Expect defined.";
       throw new IOException(msg);
       throw new IOException(msg);
     }
     }
@@ -105,7 +105,7 @@ public class TrApp
 
 
   public static String CUnescape(String s)
   public static String CUnescape(String s)
   {
   {
-    if(s.equals("\\n")) {
+    if (s.equals("\\n")) {
       return "\n";
       return "\n";
     } else {
     } else {
       return s;
       return s;

+ 1 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java

@@ -39,7 +39,7 @@ public class UniqApp
     String line;
     String line;
     String prevLine = null;
     String prevLine = null;
     while ((line = in.readLine()) != null) {
     while ((line = in.readLine()) != null) {
-      if(! line.equals(prevLine)) {
+      if (!line.equals(prevLine)) {
         System.out.println(header + line);
         System.out.println(header + line);
       }
       }
       prevLine = line;
       prevLine = line;

+ 2 - 2
src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java

@@ -34,7 +34,7 @@ class UtilTest {
 
 
   void checkUserDir() {
   void checkUserDir() {
     // trunk/src/contrib/streaming --> trunk/build/contrib/streaming/test/data
     // trunk/src/contrib/streaming --> trunk/build/contrib/streaming/test/data
-    if(! userDir_.equals(antTestDir_)) {
+    if (!userDir_.equals(antTestDir_)) {
       // because changes to user.dir are ignored by File static methods.
       // because changes to user.dir are ignored by File static methods.
       throw new IllegalStateException("user.dir != test.build.data. The junit Ant task must be forked.");
       throw new IllegalStateException("user.dir != test.build.data. The junit Ant task must be forked.");
     }
     }
@@ -43,7 +43,7 @@ class UtilTest {
   void redirectIfAntJunit() throws IOException
   void redirectIfAntJunit() throws IOException
   {
   {
     boolean fromAntJunit = System.getProperty("test.build.data") != null;
     boolean fromAntJunit = System.getProperty("test.build.data") != null;
-    if(fromAntJunit) {
+    if (fromAntJunit) {
       new File(antTestDir_).mkdirs();
       new File(antTestDir_).mkdirs();
       File outFile = new File(antTestDir_, testName_+".log");
       File outFile = new File(antTestDir_, testName_+".log");
       PrintStream out = new PrintStream(new FileOutputStream(outFile));
       PrintStream out = new PrintStream(new FileOutputStream(outFile));

+ 13 - 13
src/java/org/apache/hadoop/conf/Configuration.java

@@ -107,7 +107,7 @@ public class Configuration {
     this.finalResources = (ArrayList)other.finalResources.clone();
     this.finalResources = (ArrayList)other.finalResources.clone();
     if (other.properties != null)
     if (other.properties != null)
       this.properties = (Properties)other.properties.clone();
       this.properties = (Properties)other.properties.clone();
-    if(other.overlay!=null)
+    if (other.overlay!=null)
       this.overlay = (Properties)other.overlay.clone();
       this.overlay = (Properties)other.overlay.clone();
   }
   }
 
 
@@ -142,7 +142,7 @@ public class Configuration {
   }
   }
 
 
   private synchronized void addResource(ArrayList<Object> resources,
   private synchronized void addResource(ArrayList<Object> resources,
-      Object resource) {
+                                        Object resource) {
     
     
     resources.add(resource);                      // add to resources
     resources.add(resource);                      // add to resources
     properties = null;                            // trigger reload
     properties = null;                            // trigger reload
@@ -172,23 +172,23 @@ public class Configuration {
   private static int MAX_SUBST = 20;
   private static int MAX_SUBST = 20;
 
 
   private String substituteVars(String expr) {
   private String substituteVars(String expr) {
-    if(expr == null) {
+    if (expr == null) {
       return null;
       return null;
     }
     }
     Matcher match = varPat.matcher("");
     Matcher match = varPat.matcher("");
     String eval = expr;
     String eval = expr;
     for(int s=0; s<MAX_SUBST; s++) {
     for(int s=0; s<MAX_SUBST; s++) {
       match.reset(eval);
       match.reset(eval);
-      if(! match.find()) {
+      if (!match.find()) {
         return eval;
         return eval;
       }
       }
       String var = match.group();
       String var = match.group();
       var = var.substring(2, var.length()-1); // remove ${ .. }
       var = var.substring(2, var.length()-1); // remove ${ .. }
       String val = System.getProperty(var);
       String val = System.getProperty(var);
-      if(val == null) {
+      if (val == null) {
         val = (String)this.getObject(var);
         val = (String)this.getObject(var);
       }
       }
-      if(val == null) {
+      if (val == null) {
         return eval; // return literal ${var}: var is unbound
         return eval; // return literal ${var}: var is unbound
       }
       }
       // substitute
       // substitute
@@ -211,7 +211,7 @@ public class Configuration {
   }
   }
   
   
   private synchronized Properties getOverlay() {
   private synchronized Properties getOverlay() {
-    if(overlay==null){
+    if (overlay==null){
       overlay=new Properties();
       overlay=new Properties();
     }
     }
     return overlay;
     return overlay;
@@ -221,7 +221,7 @@ public class Configuration {
    * exists, then <code>defaultValue</code> is returned.
    * exists, then <code>defaultValue</code> is returned.
    */
    */
   public String get(String name, String defaultValue) {
   public String get(String name, String defaultValue) {
-     return substituteVars(getProps().getProperty(name, defaultValue));
+    return substituteVars(getProps().getProperty(name, defaultValue));
   }
   }
     
     
   /** Returns the value of the <code>name</code> property as an integer.  If no
   /** Returns the value of the <code>name</code> property as an integer.  If no
@@ -338,7 +338,7 @@ public class Configuration {
    * interface. 
    * interface. 
    */
    */
   public Class<?> getClass(String propertyName, Class<?> defaultValue,
   public Class<?> getClass(String propertyName, Class<?> defaultValue,
-      Class<?> xface) {
+                           Class<?> xface) {
     
     
     try {
     try {
       Class<?> theClass = getClass(propertyName, defaultValue);
       Class<?> theClass = getClass(propertyName, defaultValue);
@@ -354,7 +354,7 @@ public class Configuration {
    * First checks that the class implements the named interface. 
    * First checks that the class implements the named interface. 
    */
    */
   public void setClass(String propertyName, Class<?> theClass,
   public void setClass(String propertyName, Class<?> theClass,
-      Class<?> xface) {
+                       Class<?> xface) {
     
     
     if (!xface.isAssignableFrom(theClass))
     if (!xface.isAssignableFrom(theClass))
       throw new RuntimeException(theClass+" not "+xface.getName());
       throw new RuntimeException(theClass+" not "+xface.getName());
@@ -380,7 +380,7 @@ public class Configuration {
       }
       }
     }
     }
     LOG.warn("Could not make " + path + 
     LOG.warn("Could not make " + path + 
-                " in local directories from " + dirsProp);
+             " in local directories from " + dirsProp);
     for(int i=0; i < dirs.length; i++) {
     for(int i=0; i < dirs.length; i++) {
       int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length;
       int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length;
       LOG.warn(dirsProp + "[" + index + "]=" + dirs[index]);
       LOG.warn(dirsProp + "[" + index + "]=" + dirs[index]);
@@ -460,7 +460,7 @@ public class Configuration {
       loadResources(newProps, defaultResources, false, quietmode);
       loadResources(newProps, defaultResources, false, quietmode);
       loadResources(newProps, finalResources, true, true);
       loadResources(newProps, finalResources, true, true);
       properties = newProps;
       properties = newProps;
-      if(overlay!=null)
+      if (overlay!=null)
         properties.putAll(overlay);
         properties.putAll(overlay);
     }
     }
     return properties;
     return properties;
@@ -575,7 +575,7 @@ public class Configuration {
         String name = (String)e.nextElement();
         String name = (String)e.nextElement();
         Object object = properties.get(name);
         Object object = properties.get(name);
         String value = null;
         String value = null;
-        if(object instanceof String) {
+        if (object instanceof String) {
           value = (String) object;
           value = (String) object;
         }else {
         }else {
           continue;
           continue;

+ 3 - 3
src/java/org/apache/hadoop/dfs/Block.java

@@ -112,7 +112,7 @@ class Block implements Writable, Comparable {
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
     this.blkid = in.readLong();
     this.blkid = in.readLong();
     this.len = in.readLong();
     this.len = in.readLong();
-    if( len < 0 ) {
+    if (len < 0) {
       throw new IOException("Unexpected block size: " + len);
       throw new IOException("Unexpected block size: " + len);
     }
     }
   }
   }
@@ -122,9 +122,9 @@ class Block implements Writable, Comparable {
   /////////////////////////////////////
   /////////////////////////////////////
   public int compareTo(Object o) {
   public int compareTo(Object o) {
     Block b = (Block) o;
     Block b = (Block) o;
-    if ( blkid < b.blkid ) {
+    if (blkid < b.blkid) {
       return -1;
       return -1;
-    } else if ( blkid == b.blkid ) {
+    } else if (blkid == b.blkid) {
       return 0;
       return 0;
     } else {
     } else {
       return 1;
       return 1;

+ 70 - 70
src/java/org/apache/hadoop/dfs/BlockCommand.java

@@ -24,10 +24,10 @@ class DatanodeCommand implements Writable {
   DatanodeProtocol.DataNodeAction action;
   DatanodeProtocol.DataNodeAction action;
   
   
   public DatanodeCommand() {
   public DatanodeCommand() {
-    this( DatanodeProtocol.DataNodeAction.DNA_UNKNOWN );
+    this(DatanodeProtocol.DataNodeAction.DNA_UNKNOWN);
   }
   }
   
   
-  public DatanodeCommand( DatanodeProtocol.DataNodeAction action ) {
+  public DatanodeCommand(DatanodeProtocol.DataNodeAction action) {
     this.action = action;
     this.action = action;
   }
   }
 
 
@@ -43,12 +43,12 @@ class DatanodeCommand implements Writable {
   }
   }
 
 
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
-    WritableUtils.writeEnum( out, action );
+    WritableUtils.writeEnum(out, action);
   }
   }
   
   
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
     this.action = (DatanodeProtocol.DataNodeAction)
     this.action = (DatanodeProtocol.DataNodeAction)
-      WritableUtils.readEnum( in, DatanodeProtocol.DataNodeAction.class );
+      WritableUtils.readEnum(in, DatanodeProtocol.DataNodeAction.class);
   }
   }
 }
 }
 
 
@@ -62,81 +62,81 @@ class DatanodeCommand implements Writable {
  * @author Mike Cafarella
  * @author Mike Cafarella
  ****************************************************/
  ****************************************************/
 class BlockCommand extends DatanodeCommand {
 class BlockCommand extends DatanodeCommand {
-    Block blocks[];
-    DatanodeInfo targets[][];
+  Block blocks[];
+  DatanodeInfo targets[][];
 
 
-    public BlockCommand() {}
+  public BlockCommand() {}
 
 
-    /**
-     * Create BlockCommand for transferring blocks to another datanode
-     * @param blocks    blocks to be transferred 
-     * @param targets   nodes to transfer
-     */
-    public BlockCommand(Block blocks[], DatanodeInfo targets[][]) {
-      super(  DatanodeProtocol.DataNodeAction.DNA_TRANSFER );
-      this.blocks = blocks;
-      this.targets = targets;
-    }
+  /**
+   * Create BlockCommand for transferring blocks to another datanode
+   * @param blocks    blocks to be transferred 
+   * @param targets   nodes to transfer
+   */
+  public BlockCommand(Block blocks[], DatanodeInfo targets[][]) {
+    super( DatanodeProtocol.DataNodeAction.DNA_TRANSFER);
+    this.blocks = blocks;
+    this.targets = targets;
+  }
 
 
-    /**
-     * Create BlockCommand for block invalidation
-     * @param blocks  blocks to invalidate
-     */
-    public BlockCommand(Block blocks[]) {
-      super( DatanodeProtocol.DataNodeAction.DNA_INVALIDATE );
-      this.blocks = blocks;
-      this.targets = new DatanodeInfo[0][];
-    }
+  /**
+   * Create BlockCommand for block invalidation
+   * @param blocks  blocks to invalidate
+   */
+  public BlockCommand(Block blocks[]) {
+    super(DatanodeProtocol.DataNodeAction.DNA_INVALIDATE);
+    this.blocks = blocks;
+    this.targets = new DatanodeInfo[0][];
+  }
 
 
-    public Block[] getBlocks() {
-        return blocks;
-    }
+  public Block[] getBlocks() {
+    return blocks;
+  }
 
 
-    public DatanodeInfo[][] getTargets() {
-        return targets;
-    }
+  public DatanodeInfo[][] getTargets() {
+    return targets;
+  }
 
 
-    ///////////////////////////////////////////
-    // Writable
-    ///////////////////////////////////////////
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (BlockCommand.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new BlockCommand(); }
-         });
-    }
+  ///////////////////////////////////////////
+  // Writable
+  ///////////////////////////////////////////
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (BlockCommand.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new BlockCommand(); }
+       });
+  }
 
 
-    public void write(DataOutput out) throws IOException {
-        super.write( out );
-        out.writeInt(blocks.length);
-        for (int i = 0; i < blocks.length; i++) {
-            blocks[i].write(out);
-        }
-        out.writeInt(targets.length);
-        for (int i = 0; i < targets.length; i++) {
-            out.writeInt(targets[i].length);
-            for (int j = 0; j < targets[i].length; j++) {
-                targets[i][j].write(out);
-            }
-        }
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+    out.writeInt(blocks.length);
+    for (int i = 0; i < blocks.length; i++) {
+      blocks[i].write(out);
+    }
+    out.writeInt(targets.length);
+    for (int i = 0; i < targets.length; i++) {
+      out.writeInt(targets[i].length);
+      for (int j = 0; j < targets[i].length; j++) {
+        targets[i][j].write(out);
+      }
     }
     }
+  }
 
 
-    public void readFields(DataInput in) throws IOException {
-        super.readFields( in );
-        this.blocks = new Block[in.readInt()];
-        for (int i = 0; i < blocks.length; i++) {
-            blocks[i] = new Block();
-            blocks[i].readFields(in);
-        }
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    this.blocks = new Block[in.readInt()];
+    for (int i = 0; i < blocks.length; i++) {
+      blocks[i] = new Block();
+      blocks[i].readFields(in);
+    }
 
 
-        this.targets = new DatanodeInfo[in.readInt()][];
-        for (int i = 0; i < targets.length; i++) {
-            this.targets[i] = new DatanodeInfo[in.readInt()];
-            for (int j = 0; j < targets[i].length; j++) {
-                targets[i][j] = new DatanodeInfo();
-                targets[i][j].readFields(in);
-            }
-        }
+    this.targets = new DatanodeInfo[in.readInt()][];
+    for (int i = 0; i < targets.length; i++) {
+      this.targets[i] = new DatanodeInfo[in.readInt()];
+      for (int j = 0; j < targets[i].length; j++) {
+        targets[i][j] = new DatanodeInfo();
+        targets[i][j].readFields(in);
+      }
     }
     }
+  }
 }
 }

+ 14 - 14
src/java/org/apache/hadoop/dfs/ClientProtocol.java

@@ -61,12 +61,12 @@ interface ClientProtocol extends VersionedProtocol {
    * create multi-block files must also use reportWrittenBlock()
    * create multi-block files must also use reportWrittenBlock()
    * and addBlock().
    * and addBlock().
    */
    */
-  public LocatedBlock create( String src, 
-                              String clientName, 
-                              boolean overwrite, 
-                              short replication,
-                              long blockSize
-                              ) throws IOException;
+  public LocatedBlock create(String src, 
+                             String clientName, 
+                             boolean overwrite, 
+                             short replication,
+                             long blockSize
+                             ) throws IOException;
 
 
   /**
   /**
    * Set replication for an existing file.
    * Set replication for an existing file.
@@ -83,9 +83,9 @@ interface ClientProtocol extends VersionedProtocol {
    *         false if file does not exist or is a directory
    *         false if file does not exist or is a directory
    * @author shv
    * @author shv
    */
    */
-  public boolean setReplication( String src, 
-                                 short replication
-                                 ) throws IOException;
+  public boolean setReplication(String src, 
+                                short replication
+                                ) throws IOException;
 
 
   /**
   /**
    * If the client has not yet called reportWrittenBlock(), it can
    * If the client has not yet called reportWrittenBlock(), it can
@@ -261,7 +261,7 @@ interface ClientProtocol extends VersionedProtocol {
    * <p>
    * <p>
    * Safe mode is entered automatically at name node startup.
    * Safe mode is entered automatically at name node startup.
    * Safe mode can also be entered manually using
    * Safe mode can also be entered manually using
-   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_GET )}.
+   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
    * <p>
    * <p>
    * At startup the name node accepts data node reports collecting
    * At startup the name node accepts data node reports collecting
    * information about block locations.
    * information about block locations.
@@ -277,11 +277,11 @@ interface ClientProtocol extends VersionedProtocol {
    * Then the name node leaves safe mode.
    * Then the name node leaves safe mode.
    * <p>
    * <p>
    * If safe mode is turned on manually using
    * If safe mode is turned on manually using
-   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_ENTER )}
+   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
    * then the name node stays in safe mode until it is manually turned off
    * then the name node stays in safe mode until it is manually turned off
-   * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_LEAVE )}.
+   * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
    * Current state of the name node can be verified using
    * Current state of the name node can be verified using
-   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_GET )}
+   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
    * <h4>Configuration parameters:</h4>
    * <h4>Configuration parameters:</h4>
    * <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
    * <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
    * <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
    * <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
@@ -304,7 +304,7 @@ interface ClientProtocol extends VersionedProtocol {
    * @throws IOException
    * @throws IOException
    * @author Konstantin Shvachko
    * @author Konstantin Shvachko
    */
    */
-  public boolean setSafeMode( FSConstants.SafeModeAction action ) throws IOException;
+  public boolean setSafeMode(FSConstants.SafeModeAction action) throws IOException;
 
 
   /**
   /**
    * Tells the namenode to reread the hosts and exclude files. 
    * Tells the namenode to reread the hosts and exclude files. 

+ 37 - 37
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -216,10 +216,10 @@ class DFSClient implements FSConstants {
    * @return output stream
    * @return output stream
    * @throws IOException
    * @throws IOException
    */
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite
-                              ) throws IOException {
-    return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite
+                             ) throws IOException {
+    return create(src, overwrite, defaultReplication, defaultBlockSize, null);
   }
   }
     
     
   /**
   /**
@@ -231,11 +231,11 @@ class DFSClient implements FSConstants {
    * @return output stream
    * @return output stream
    * @throws IOException
    * @throws IOException
    */
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite,
-                              Progressable progress
-                              ) throws IOException {
-    return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite,
+                             Progressable progress
+                             ) throws IOException {
+    return create(src, overwrite, defaultReplication, defaultBlockSize, null);
   }
   }
     
     
   /**
   /**
@@ -248,11 +248,11 @@ class DFSClient implements FSConstants {
    * @return output stream
    * @return output stream
    * @throws IOException
    * @throws IOException
    */
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite, 
-                              short replication,
-                              long blockSize
-                              ) throws IOException {
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite, 
+                             short replication,
+                             long blockSize
+                             ) throws IOException {
     return create(src, overwrite, replication, blockSize, null);
     return create(src, overwrite, replication, blockSize, null);
   }
   }
 
 
@@ -267,12 +267,12 @@ class DFSClient implements FSConstants {
    * @return output stream
    * @return output stream
    * @throws IOException
    * @throws IOException
    */
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite, 
-                              short replication,
-                              long blockSize,
-                              Progressable progress
-                              ) throws IOException {
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite, 
+                             short replication,
+                             long blockSize,
+                             Progressable progress
+                             ) throws IOException {
     checkOpen();
     checkOpen();
     OutputStream result = new DFSOutputStream(src, overwrite, 
     OutputStream result = new DFSOutputStream(src, overwrite, 
                                               replication, blockSize, progress);
                                               replication, blockSize, progress);
@@ -360,8 +360,8 @@ class DFSClient implements FSConstants {
    * 
    * 
    * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
    * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
    */
    */
-  public boolean setSafeMode( SafeModeAction action ) throws IOException {
-    return namenode.setSafeMode( action );
+  public boolean setSafeMode(SafeModeAction action) throws IOException {
+    return namenode.setSafeMode(action);
   }
   }
 
 
   /**
   /**
@@ -405,9 +405,9 @@ class DFSClient implements FSConstants {
   public void lock(UTF8 src, boolean exclusive) throws IOException {
   public void lock(UTF8 src, boolean exclusive) throws IOException {
     long start = System.currentTimeMillis();
     long start = System.currentTimeMillis();
     boolean hasLock = false;
     boolean hasLock = false;
-    while (! hasLock) {
+    while (!hasLock) {
       hasLock = namenode.obtainLock(src.toString(), clientName, exclusive);
       hasLock = namenode.obtainLock(src.toString(), clientName, exclusive);
-      if (! hasLock) {
+      if (!hasLock) {
         try {
         try {
           Thread.sleep(400);
           Thread.sleep(400);
           if (System.currentTimeMillis() - start > 5000) {
           if (System.currentTimeMillis() - start > 5000) {
@@ -425,9 +425,9 @@ class DFSClient implements FSConstants {
    */
    */
   public void release(UTF8 src) throws IOException {
   public void release(UTF8 src) throws IOException {
     boolean hasReleased = false;
     boolean hasReleased = false;
-    while (! hasReleased) {
+    while (!hasReleased) {
       hasReleased = namenode.releaseLock(src.toString(), clientName);
       hasReleased = namenode.releaseLock(src.toString(), clientName);
-      if (! hasReleased) {
+      if (!hasReleased) {
         LOG.info("Could not release.  Retrying...");
         LOG.info("Could not release.  Retrying...");
         try {
         try {
           Thread.sleep(2000);
           Thread.sleep(2000);
@@ -464,7 +464,7 @@ class DFSClient implements FSConstants {
       while (running) {
       while (running) {
         if (System.currentTimeMillis() - lastRenewed > (LEASE_SOFTLIMIT_PERIOD / 2)) {
         if (System.currentTimeMillis() - lastRenewed > (LEASE_SOFTLIMIT_PERIOD / 2)) {
           try {
           try {
-            if( pendingCreates.size() > 0 )
+            if (pendingCreates.size() > 0)
               namenode.renewLease(clientName);
               namenode.renewLease(clientName);
             lastRenewed = System.currentTimeMillis();
             lastRenewed = System.currentTimeMillis();
           } catch (IOException ie) {
           } catch (IOException ie) {
@@ -538,7 +538,7 @@ class DFSClient implements FSConstants {
 
 
       if (oldBlocks != null) {
       if (oldBlocks != null) {
         for (int i = 0; i < oldBlocks.length; i++) {
         for (int i = 0; i < oldBlocks.length; i++) {
-          if (! oldBlocks[i].equals(newBlocks[i])) {
+          if (!oldBlocks[i].equals(newBlocks[i])) {
             throw new IOException("Blocklist for " + src + " has changed!");
             throw new IOException("Blocklist for " + src + " has changed!");
           }
           }
         }
         }
@@ -912,7 +912,7 @@ class DFSClient implements FSConstants {
       deadNodes.add(currentNode);
       deadNodes.add(currentNode);
       DatanodeInfo oldNode = currentNode;
       DatanodeInfo oldNode = currentNode;
       DatanodeInfo newNode = blockSeekTo(targetPos);
       DatanodeInfo newNode = blockSeekTo(targetPos);
-      if ( !markedDead ) {
+      if (!markedDead) {
         /* remove it from deadNodes. blockSeekTo could have cleared 
         /* remove it from deadNodes. blockSeekTo could have cleared 
          * deadNodes and added currentNode again. Thats ok. */
          * deadNodes and added currentNode again. Thats ok. */
         deadNodes.remove(oldNode);
         deadNodes.remove(oldNode);
@@ -1037,7 +1037,7 @@ class DFSClient implements FSConstants {
      * filedescriptor that we don't own.
      * filedescriptor that we don't own.
      */
      */
     private void closeBackupStream() throws IOException {
     private void closeBackupStream() throws IOException {
-      if ( backupStream != null ) {
+      if (backupStream != null) {
         OutputStream stream = backupStream;
         OutputStream stream = backupStream;
         backupStream = null;
         backupStream = null;
         stream.close();
         stream.close();
@@ -1047,7 +1047,7 @@ class DFSClient implements FSConstants {
      * twice could result in deleting a file that we should not.
      * twice could result in deleting a file that we should not.
      */
      */
     private void deleteBackupFile() {
     private void deleteBackupFile() {
-      if ( backupFile != null ) {
+      if (backupFile != null) {
         File file = backupFile;
         File file = backupFile;
         backupFile = null;
         backupFile = null;
         file.delete();
         file.delete();
@@ -1081,8 +1081,8 @@ class DFSClient implements FSConstants {
         }
         }
 
 
         block = lb.getBlock();
         block = lb.getBlock();
-        if ( block.getNumBytes() < bytesWrittenToBlock ) {
-          block.setNumBytes( bytesWrittenToBlock );
+        if (block.getNumBytes() < bytesWrittenToBlock) {
+          block.setNumBytes(bytesWrittenToBlock);
         }
         }
         DatanodeInfo nodes[] = lb.getLocations();
         DatanodeInfo nodes[] = lb.getLocations();
 
 
@@ -1270,9 +1270,9 @@ class DFSClient implements FSConstants {
       int workingPos = Math.min(pos, maxPos);
       int workingPos = Math.min(pos, maxPos);
             
             
       if (workingPos > 0) {
       if (workingPos > 0) {
-        if ( backupStream == null ) {
-          throw new IOException( "Trying to write to backupStream " +
-                                 "but it already closed or not open");
+        if (backupStream == null) {
+          throw new IOException("Trying to write to backupStream " +
+                                "but it already closed or not open");
         }
         }
         //
         //
         // To the local block backup, write just the bytes
         // To the local block backup, write just the bytes
@@ -1417,7 +1417,7 @@ class DFSClient implements FSConstants {
 
 
         long localstart = System.currentTimeMillis();
         long localstart = System.currentTimeMillis();
         boolean fileComplete = false;
         boolean fileComplete = false;
-        while (! fileComplete) {
+        while (!fileComplete) {
           fileComplete = namenode.complete(src.toString(), clientName.toString());
           fileComplete = namenode.complete(src.toString(), clientName.toString());
           if (!fileComplete) {
           if (!fileComplete) {
             try {
             try {

+ 2 - 2
src/java/org/apache/hadoop/dfs/DFSFileInfo.java

@@ -52,10 +52,10 @@ class DFSFileInfo implements Writable {
   /**
   /**
    * Create DFSFileInfo by file INode 
    * Create DFSFileInfo by file INode 
    */
    */
-  public DFSFileInfo( FSDirectory.INode node ) {
+  public DFSFileInfo(FSDirectory.INode node) {
     this.path = new UTF8(node.computeName());
     this.path = new UTF8(node.computeName());
     this.isDir = node.isDir();
     this.isDir = node.isDir();
-    if( isDir ) {
+    if (isDir) {
       this.len = 0;
       this.len = 0;
       this.contentsLen = node.computeContentsLength();
       this.contentsLen = node.computeContentsLength();
     } else 
     } else 

+ 3 - 3
src/java/org/apache/hadoop/dfs/DFSck.java

@@ -108,7 +108,7 @@ public class DFSck extends ToolBase {
     URLConnection connection = path.openConnection();
     URLConnection connection = path.openConnection();
     InputStream stream = connection.getInputStream();
     InputStream stream = connection.getInputStream();
     InputStreamReader input =
     InputStreamReader input =
-        new InputStreamReader(stream, "UTF-8");
+      new InputStreamReader(stream, "UTF-8");
     try {
     try {
       int c = input.read();
       int c = input.read();
       while (c != -1) {
       while (c != -1) {
@@ -122,7 +122,7 @@ public class DFSck extends ToolBase {
   }
   }
 
 
   public static void main(String[] args) throws Exception {
   public static void main(String[] args) throws Exception {
-      int res = new DFSck().doMain(new Configuration(), args);
-      System.exit(res);
+    int res = new DFSck().doMain(new Configuration(), args);
+    System.exit(res);
   }
   }
 }
 }

+ 99 - 99
src/java/org/apache/hadoop/dfs/DataNode.java

@@ -193,19 +193,19 @@ public class DataNode implements FSConstants, Runnable {
    * Create the DataNode given a configuration and an array of dataDirs.
    * Create the DataNode given a configuration and an array of dataDirs.
    * 'dataDirs' is where the blocks are stored.
    * 'dataDirs' is where the blocks are stored.
    */
    */
-  DataNode( Configuration conf, 
-            AbstractList<File> dataDirs ) throws IOException {
+  DataNode(Configuration conf, 
+           AbstractList<File> dataDirs) throws IOException {
     try {
     try {
-      startDataNode( conf, dataDirs );
+      startDataNode(conf, dataDirs);
     } catch (IOException ie) {
     } catch (IOException ie) {
       shutdown();
       shutdown();
       throw ie;
       throw ie;
     }
     }
   }
   }
     
     
-  void startDataNode( Configuration conf, 
-                      AbstractList<File> dataDirs
-                      ) throws IOException {
+  void startDataNode(Configuration conf, 
+                     AbstractList<File> dataDirs
+                     ) throws IOException {
     // use configured nameserver & interface to get local hostname
     // use configured nameserver & interface to get local hostname
     machineName = DNS.getDefaultHost(
     machineName = DNS.getDefaultHost(
                                      conf.get("dfs.datanode.dns.interface","default"),
                                      conf.get("dfs.datanode.dns.interface","default"),
@@ -223,14 +223,14 @@ public class DataNode implements FSConstants, Runnable {
     NamespaceInfo nsInfo = handshake();
     NamespaceInfo nsInfo = handshake();
 
 
     // read storage info, lock data dirs and transition fs state if necessary
     // read storage info, lock data dirs and transition fs state if necessary
-    StartupOption startOpt = (StartupOption)conf.get( "dfs.datanode.startup", 
-                                                      StartupOption.REGULAR );
+    StartupOption startOpt = (StartupOption)conf.get("dfs.datanode.startup", 
+                                                     StartupOption.REGULAR);
     assert startOpt != null : "Startup option must be set.";
     assert startOpt != null : "Startup option must be set.";
     storage = new DataStorage();
     storage = new DataStorage();
-    storage.recoverTransitionRead( nsInfo, dataDirs, startOpt );
+    storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
       
       
     // initialize data node internal structure
     // initialize data node internal structure
-    this.data = new FSDataset( storage, conf );
+    this.data = new FSDataset(storage, conf);
       
       
     // find free port
     // find free port
     ServerSocket ss = null;
     ServerSocket ss = null;
@@ -238,7 +238,7 @@ public class DataNode implements FSConstants, Runnable {
     String bindAddress = conf.get("dfs.datanode.bindAddress", "0.0.0.0");
     String bindAddress = conf.get("dfs.datanode.bindAddress", "0.0.0.0");
     while (ss == null) {
     while (ss == null) {
       try {
       try {
-        ss = new ServerSocket(tmpPort,0,InetAddress.getByName(bindAddress));
+        ss = new ServerSocket(tmpPort, 0, InetAddress.getByName(bindAddress));
         LOG.info("Opened server at " + tmpPort);
         LOG.info("Opened server at " + tmpPort);
       } catch (IOException ie) {
       } catch (IOException ie) {
         LOG.info("Could not open server at " + tmpPort + ", trying new port");
         LOG.info("Could not open server at " + tmpPort + ", trying new port");
@@ -246,10 +246,10 @@ public class DataNode implements FSConstants, Runnable {
       }
       }
     }
     }
     // construct registration
     // construct registration
-    this.dnRegistration = new DatanodeRegistration( 
+    this.dnRegistration = new DatanodeRegistration(
                                                    machineName + ":" + tmpPort, 
                                                    machineName + ":" + tmpPort, 
                                                    -1,   // info port determined later
                                                    -1,   // info port determined later
-                                                   storage );
+                                                   storage);
       
       
     this.dataXceiveServer = new Daemon(new DataXceiveServer(ss));
     this.dataXceiveServer = new Daemon(new DataXceiveServer(ss));
 
 
@@ -268,9 +268,9 @@ public class DataNode implements FSConstants, Runnable {
     this.infoServer.start();
     this.infoServer.start();
     this.dnRegistration.infoPort = this.infoServer.getPort();
     this.dnRegistration.infoPort = this.infoServer.getPort();
     // get network location
     // get network location
-    this.networkLoc = conf.get( "dfs.datanode.rack" );
-    if( networkLoc == null )  // exec network script or set the default rack
-      networkLoc = getNetworkLoc( conf );
+    this.networkLoc = conf.get("dfs.datanode.rack");
+    if (networkLoc == null)  // exec network script or set the default rack
+      networkLoc = getNetworkLoc(conf);
     // register datanode
     // register datanode
     register();
     register();
     datanodeObject = this;
     datanodeObject = this;
@@ -282,7 +282,7 @@ public class DataNode implements FSConstants, Runnable {
       try {
       try {
         nsInfo = namenode.versionRequest();
         nsInfo = namenode.versionRequest();
         break;
         break;
-      } catch( SocketTimeoutException e ) {  // namenode is busy
+      } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
         try {
         try {
           Thread.sleep(1000);
           Thread.sleep(1000);
@@ -291,18 +291,18 @@ public class DataNode implements FSConstants, Runnable {
     }
     }
     String errorMsg = null;
     String errorMsg = null;
     // verify build version
     // verify build version
-    if( ! nsInfo.getBuildVersion().equals( Storage.getBuildVersion() )) {
+    if (!nsInfo.getBuildVersion().equals(Storage.getBuildVersion())) {
       errorMsg = "Incompatible build versions: namenode BV = " 
       errorMsg = "Incompatible build versions: namenode BV = " 
         + nsInfo.getBuildVersion() + "; datanode BV = "
         + nsInfo.getBuildVersion() + "; datanode BV = "
         + Storage.getBuildVersion();
         + Storage.getBuildVersion();
-      LOG.fatal( errorMsg );
+      LOG.fatal(errorMsg);
       try {
       try {
-        namenode.errorReport( dnRegistration,
-                              DatanodeProtocol.NOTIFY, errorMsg );
-      } catch( SocketTimeoutException e ) {  // namenode is busy
+        namenode.errorReport(dnRegistration,
+                             DatanodeProtocol.NOTIFY, errorMsg);
+      } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
       }
       }
-      throw new IOException( errorMsg );
+      throw new IOException(errorMsg);
     }
     }
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
       "Data-node and name-node layout versions must be the same.";
       "Data-node and name-node layout versions must be the same.";
@@ -340,21 +340,21 @@ public class DataNode implements FSConstants, Runnable {
    * @throws IOException
    * @throws IOException
    */
    */
   private void register() throws IOException {
   private void register() throws IOException {
-    while( shouldRun ) {
+    while(shouldRun) {
       try {
       try {
         // reset name to machineName. Mainly for web interface.
         // reset name to machineName. Mainly for web interface.
         dnRegistration.name = machineName + ":" + dnRegistration.getPort();
         dnRegistration.name = machineName + ":" + dnRegistration.getPort();
-        dnRegistration = namenode.register( dnRegistration, networkLoc );
+        dnRegistration = namenode.register(dnRegistration, networkLoc);
         break;
         break;
-      } catch( SocketTimeoutException e ) {  // namenode is busy
+      } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
         try {
         try {
           Thread.sleep(1000);
           Thread.sleep(1000);
         } catch (InterruptedException ie) {}
         } catch (InterruptedException ie) {}
       }
       }
     }
     }
-    if( storage.getStorageID().equals("") ) {
-      storage.setStorageID( dnRegistration.getStorageID());
+    if (storage.getStorageID().equals("")) {
+      storage.setStorageID(dnRegistration.getStorageID());
       storage.writeAll();
       storage.writeAll();
     }
     }
   }
   }
@@ -390,12 +390,12 @@ public class DataNode implements FSConstants, Runnable {
     }
     }
   }
   }
 
 
-  void handleDiskError( String errMsgr ) {
-    LOG.warn( "DataNode is shutting down.\n" + errMsgr );
+  void handleDiskError(String errMsgr) {
+    LOG.warn("DataNode is shutting down.\n" + errMsgr);
     try {
     try {
       namenode.errorReport(
       namenode.errorReport(
                            dnRegistration, DatanodeProtocol.DISK_ERROR, errMsgr);
                            dnRegistration, DatanodeProtocol.DISK_ERROR, errMsgr);
-    } catch( IOException ignored) {              
+    } catch(IOException ignored) {              
     }
     }
     shutdown();
     shutdown();
   }
   }
@@ -438,20 +438,20 @@ public class DataNode implements FSConstants, Runnable {
           // -- Total capacity
           // -- Total capacity
           // -- Bytes remaining
           // -- Bytes remaining
           //
           //
-          DatanodeCommand cmd = namenode.sendHeartbeat( dnRegistration, 
-                                                        data.getCapacity(), 
-                                                        data.getRemaining(), 
-                                                        xmitsInProgress,
-                                                        xceiverCount.getValue());
+          DatanodeCommand cmd = namenode.sendHeartbeat(dnRegistration, 
+                                                       data.getCapacity(), 
+                                                       data.getRemaining(), 
+                                                       xmitsInProgress,
+                                                       xceiverCount.getValue());
           //LOG.info("Just sent heartbeat, with name " + localName);
           //LOG.info("Just sent heartbeat, with name " + localName);
           lastHeartbeat = now;
           lastHeartbeat = now;
-          if( ! processCommand( cmd ) )
+          if (!processCommand(cmd))
             continue;
             continue;
         }
         }
             
             
         // check if there are newly received blocks
         // check if there are newly received blocks
         Block [] blockArray=null;
         Block [] blockArray=null;
-        synchronized( receivedBlockList ) {
+        synchronized(receivedBlockList) {
           if (receivedBlockList.size() > 0) {
           if (receivedBlockList.size() > 0) {
             //
             //
             // Send newly-received blockids to namenode
             // Send newly-received blockids to namenode
@@ -459,8 +459,8 @@ public class DataNode implements FSConstants, Runnable {
             blockArray = receivedBlockList.toArray(new Block[receivedBlockList.size()]);
             blockArray = receivedBlockList.toArray(new Block[receivedBlockList.size()]);
           }
           }
         }
         }
-        if( blockArray != null ) {
-          namenode.blockReceived( dnRegistration, blockArray );
+        if (blockArray != null) {
+          namenode.blockReceived(dnRegistration, blockArray);
           synchronized (receivedBlockList) {
           synchronized (receivedBlockList) {
             for(Block b: blockArray) {
             for(Block b: blockArray) {
               receivedBlockList.remove(b);
               receivedBlockList.remove(b);
@@ -475,9 +475,9 @@ public class DataNode implements FSConstants, Runnable {
           // Get back a list of local block(s) that are obsolete
           // Get back a list of local block(s) that are obsolete
           // and can be safely GC'ed.
           // and can be safely GC'ed.
           //
           //
-          DatanodeCommand cmd = namenode.blockReport( dnRegistration,
-                                                      data.getBlockReport());
-          processCommand( cmd );
+          DatanodeCommand cmd = namenode.blockReport(dnRegistration,
+                                                     data.getBlockReport());
+          processCommand(cmd);
           lastBlockReport = now;
           lastBlockReport = now;
         }
         }
             
             
@@ -486,7 +486,7 @@ public class DataNode implements FSConstants, Runnable {
         // or work arrives, and then iterate again.
         // or work arrives, and then iterate again.
         //
         //
         long waitTime = heartBeatInterval - (System.currentTimeMillis() - lastHeartbeat);
         long waitTime = heartBeatInterval - (System.currentTimeMillis() - lastHeartbeat);
-        synchronized( receivedBlockList ) {
+        synchronized(receivedBlockList) {
           if (waitTime > 0 && receivedBlockList.size() == 0) {
           if (waitTime > 0 && receivedBlockList.size() == 0) {
             try {
             try {
               receivedBlockList.wait(waitTime);
               receivedBlockList.wait(waitTime);
@@ -497,12 +497,12 @@ public class DataNode implements FSConstants, Runnable {
       } catch(DiskErrorException e) {
       } catch(DiskErrorException e) {
         handleDiskError(e.getLocalizedMessage());
         handleDiskError(e.getLocalizedMessage());
         return;
         return;
-      } catch( RemoteException re ) {
+      } catch(RemoteException re) {
         String reClass = re.getClassName();
         String reClass = re.getClassName();
-        if( UnregisteredDatanodeException.class.getName().equals( reClass ) ||
-            DisallowedDatanodeException.class.getName().equals( reClass )) {
-          LOG.warn( "DataNode is shutting down: " + 
-                    StringUtils.stringifyException(re));
+        if (UnregisteredDatanodeException.class.getName().equals(reClass) ||
+            DisallowedDatanodeException.class.getName().equals(reClass)) {
+          LOG.warn("DataNode is shutting down: " + 
+                   StringUtils.stringifyException(re));
           shutdown();
           shutdown();
           return;
           return;
         }
         }
@@ -519,16 +519,16 @@ public class DataNode implements FSConstants, Runnable {
      * @return true if further processing may be required or false otherwise. 
      * @return true if further processing may be required or false otherwise. 
      * @throws IOException
      * @throws IOException
      */
      */
-  private boolean processCommand( DatanodeCommand cmd ) throws IOException {
-    if( cmd == null )
+  private boolean processCommand(DatanodeCommand cmd) throws IOException {
+    if (cmd == null)
       return true;
       return true;
-    switch( cmd.action ) {
+    switch(cmd.action) {
     case DNA_TRANSFER:
     case DNA_TRANSFER:
       //
       //
       // Send a copy of a block to another datanode
       // Send a copy of a block to another datanode
       //
       //
       BlockCommand bcmd = (BlockCommand)cmd;
       BlockCommand bcmd = (BlockCommand)cmd;
-      transferBlocks( bcmd.getBlocks(), bcmd.getTargets() );
+      transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
       break;
       break;
     case DNA_INVALIDATE:
     case DNA_INVALIDATE:
       //
       //
@@ -553,21 +553,21 @@ public class DataNode implements FSConstants, Runnable {
       storage.finalizeUpgrade();
       storage.finalizeUpgrade();
       break;
       break;
     default:
     default:
-      LOG.warn( "Unknown DatanodeCommand action: " + cmd.action);
+      LOG.warn("Unknown DatanodeCommand action: " + cmd.action);
     }
     }
     return true;
     return true;
   }
   }
     
     
-  private void transferBlocks(  Block blocks[], 
-                                DatanodeInfo xferTargets[][] 
-                                ) throws IOException {
+  private void transferBlocks( Block blocks[], 
+                               DatanodeInfo xferTargets[][] 
+                               ) throws IOException {
     for (int i = 0; i < blocks.length; i++) {
     for (int i = 0; i < blocks.length; i++) {
       if (!data.isValidBlock(blocks[i])) {
       if (!data.isValidBlock(blocks[i])) {
         String errStr = "Can't send invalid block " + blocks[i];
         String errStr = "Can't send invalid block " + blocks[i];
         LOG.info(errStr);
         LOG.info(errStr);
-        namenode.errorReport( dnRegistration, 
-                              DatanodeProtocol.INVALID_BLOCK, 
-                              errStr );
+        namenode.errorReport(dnRegistration, 
+                             DatanodeProtocol.INVALID_BLOCK, 
+                             errStr);
         break;
         break;
       }
       }
       if (xferTargets[i].length > 0) {
       if (xferTargets[i].length > 0) {
@@ -689,7 +689,7 @@ public class DataNode implements FSConstants, Runnable {
         //
         //
         // Write filelen of -1 if error
         // Write filelen of -1 if error
         //
         //
-        if (! data.isValidBlock(b)) {
+        if (!data.isValidBlock(b)) {
           out.writeLong(-1);
           out.writeLong(-1);
         } else {
         } else {
           //
           //
@@ -1130,11 +1130,11 @@ public class DataNode implements FSConstants, Runnable {
   /** Start a single datanode daemon and wait for it to finish.
   /** Start a single datanode daemon and wait for it to finish.
    *  If this thread is specifically interrupted, it will stop waiting.
    *  If this thread is specifically interrupted, it will stop waiting.
    */
    */
-  static DataNode createDataNode( String args[],
-                                  Configuration conf ) throws IOException {
-    if( conf == null )
+  static DataNode createDataNode(String args[],
+                                 Configuration conf) throws IOException {
+    if (conf == null)
       conf = new Configuration();
       conf = new Configuration();
-    if( ! parseArguments( args, conf )) {
+    if (!parseArguments(args, conf)) {
       printUsage();
       printUsage();
       return null;
       return null;
     }
     }
@@ -1160,21 +1160,21 @@ public class DataNode implements FSConstants, Runnable {
    * no directory from this directory list can be created.
    * no directory from this directory list can be created.
    * @throws IOException
    * @throws IOException
    */
    */
-  static DataNode makeInstance( String[] dataDirs, Configuration conf )
+  static DataNode makeInstance(String[] dataDirs, Configuration conf)
     throws IOException {
     throws IOException {
     ArrayList<File> dirs = new ArrayList<File>();
     ArrayList<File> dirs = new ArrayList<File>();
     for (int i = 0; i < dataDirs.length; i++) {
     for (int i = 0; i < dataDirs.length; i++) {
       File data = new File(dataDirs[i]);
       File data = new File(dataDirs[i]);
       try {
       try {
-        DiskChecker.checkDir( data );
+        DiskChecker.checkDir(data);
         dirs.add(data);
         dirs.add(data);
-      } catch( DiskErrorException e ) {
-        LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage() );
+      } catch(DiskErrorException e) {
+        LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage());
       }
       }
     }
     }
-    if( dirs.size() > 0 ) 
+    if (dirs.size() > 0) 
       return new DataNode(conf, dirs);
       return new DataNode(conf, dirs);
-    LOG.error("All directories in dfs.data.dir are invalid." );
+    LOG.error("All directories in dfs.data.dir are invalid.");
     return null;
     return null;
   }
   }
 
 
@@ -1199,45 +1199,45 @@ public class DataNode implements FSConstants, Runnable {
    * @return false if passed argements are incorrect
    * @return false if passed argements are incorrect
    */
    */
   private static boolean parseArguments(String args[], 
   private static boolean parseArguments(String args[], 
-                                        Configuration conf ) {
+                                        Configuration conf) {
     int argsLen = (args == null) ? 0 : args.length;
     int argsLen = (args == null) ? 0 : args.length;
     StartupOption startOpt = StartupOption.REGULAR;
     StartupOption startOpt = StartupOption.REGULAR;
     String networkLoc = null;
     String networkLoc = null;
-    for( int i=0; i < argsLen; i++ ) {
+    for(int i=0; i < argsLen; i++) {
       String cmd = args[i];
       String cmd = args[i];
-      if( "-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd) ) {
-        if( i==args.length-1 )
+      if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
+        if (i==args.length-1)
           return false;
           return false;
         networkLoc = args[++i];
         networkLoc = args[++i];
-        if( networkLoc.startsWith("-") )
+        if (networkLoc.startsWith("-"))
           return false;
           return false;
-      } else if( "-rollback".equalsIgnoreCase(cmd) ) {
+      } else if ("-rollback".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.ROLLBACK;
         startOpt = StartupOption.ROLLBACK;
-      } else if( "-regular".equalsIgnoreCase(cmd) ) {
+      } else if ("-regular".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.REGULAR;
         startOpt = StartupOption.REGULAR;
       } else
       } else
         return false;
         return false;
     }
     }
-    if( networkLoc != null )
-      conf.set( "dfs.datanode.rack", NodeBase.normalize( networkLoc ));
-    conf.setObject( "dfs.datanode.startup", startOpt );
+    if (networkLoc != null)
+      conf.set("dfs.datanode.rack", NodeBase.normalize(networkLoc));
+    conf.setObject("dfs.datanode.startup", startOpt);
     return true;
     return true;
   }
   }
 
 
   /* Get the network location by running a script configured in conf */
   /* Get the network location by running a script configured in conf */
-  private static String getNetworkLoc( Configuration conf ) 
+  private static String getNetworkLoc(Configuration conf) 
     throws IOException {
     throws IOException {
-    String locScript = conf.get("dfs.network.script" );
-    if( locScript == null ) 
+    String locScript = conf.get("dfs.network.script");
+    if (locScript == null) 
       return NetworkTopology.DEFAULT_RACK;
       return NetworkTopology.DEFAULT_RACK;
 
 
-    LOG.info( "Starting to run script to get datanode network location");
-    Process p = Runtime.getRuntime().exec( locScript );
+    LOG.info("Starting to run script to get datanode network location");
+    Process p = Runtime.getRuntime().exec(locScript);
     StringBuffer networkLoc = new StringBuffer();
     StringBuffer networkLoc = new StringBuffer();
     final BufferedReader inR = new BufferedReader(
     final BufferedReader inR = new BufferedReader(
-                                                  new InputStreamReader(p.getInputStream() ) );
+                                                  new InputStreamReader(p.getInputStream()));
     final BufferedReader errR = new BufferedReader(
     final BufferedReader errR = new BufferedReader(
-                                                   new InputStreamReader( p.getErrorStream() ) );
+                                                   new InputStreamReader(p.getErrorStream()));
 
 
     // read & log any error messages from the running script
     // read & log any error messages from the running script
     Thread errThread = new Thread() {
     Thread errThread = new Thread() {
@@ -1248,7 +1248,7 @@ public class DataNode implements FSConstants, Runnable {
               LOG.warn("Network script error: "+errLine);
               LOG.warn("Network script error: "+errLine);
               errLine = errR.readLine();
               errLine = errR.readLine();
             }
             }
-          } catch( IOException e) {
+          } catch(IOException e) {
                     
                     
           }
           }
         }
         }
@@ -1258,32 +1258,32 @@ public class DataNode implements FSConstants, Runnable {
             
             
       // fetch output from the process
       // fetch output from the process
       String line = inR.readLine();
       String line = inR.readLine();
-      while( line != null ) {
-        networkLoc.append( line );
+      while(line != null) {
+        networkLoc.append(line);
         line = inR.readLine();
         line = inR.readLine();
       }
       }
       try {
       try {
         // wait for the process to finish
         // wait for the process to finish
         int returnVal = p.waitFor();
         int returnVal = p.waitFor();
         // check the exit code
         // check the exit code
-        if( returnVal != 0 ) {
+        if (returnVal != 0) {
           throw new IOException("Process exits with nonzero status: "+locScript);
           throw new IOException("Process exits with nonzero status: "+locScript);
         }
         }
       } catch (InterruptedException e) {
       } catch (InterruptedException e) {
-        throw new IOException( e.getMessage() );
+        throw new IOException(e.getMessage());
       } finally {
       } finally {
         try {
         try {
           // make sure that the error thread exits
           // make sure that the error thread exits
           errThread.join();
           errThread.join();
         } catch (InterruptedException je) {
         } catch (InterruptedException je) {
-          LOG.warn( StringUtils.stringifyException(je));
+          LOG.warn(StringUtils.stringifyException(je));
         }
         }
       }
       }
     } finally {
     } finally {
       // close in & error streams
       // close in & error streams
       try {
       try {
         inR.close();
         inR.close();
-      } catch ( IOException ine ) {
+      } catch (IOException ine) {
         throw ine;
         throw ine;
       } finally {
       } finally {
         errR.close();
         errR.close();
@@ -1297,11 +1297,11 @@ public class DataNode implements FSConstants, Runnable {
    */
    */
   public static void main(String args[]) {
   public static void main(String args[]) {
     try {
     try {
-      DataNode datanode = createDataNode( args, null );
-      if( datanode != null )
+      DataNode datanode = createDataNode(args, null);
+      if (datanode != null)
         datanode.join();
         datanode.join();
-    } catch ( Throwable e ) {
-      LOG.error( StringUtils.stringifyException( e ) );
+    } catch (Throwable e) {
+      LOG.error(StringUtils.stringifyException(e));
       System.exit(-1);
       System.exit(-1);
     }
     }
   }
   }

+ 156 - 156
src/java/org/apache/hadoop/dfs/DataStorage.java

@@ -29,17 +29,17 @@ class DataStorage extends Storage {
   private String storageID;
   private String storageID;
 
 
   DataStorage() {
   DataStorage() {
-    super( NodeType.DATA_NODE );
+    super(NodeType.DATA_NODE);
     storageID = "";
     storageID = "";
   }
   }
   
   
-  DataStorage( int nsID, long cT, String strgID ) {
-    super( NodeType.DATA_NODE, nsID, cT );
+  DataStorage(int nsID, long cT, String strgID) {
+    super(NodeType.DATA_NODE, nsID, cT);
     this.storageID = strgID;
     this.storageID = strgID;
   }
   }
   
   
-  DataStorage( StorageInfo storageInfo, String strgID ) {
-    super( NodeType.DATA_NODE, storageInfo );
+  DataStorage(StorageInfo storageInfo, String strgID) {
+    super(NodeType.DATA_NODE, storageInfo);
     this.storageID = strgID;
     this.storageID = strgID;
   }
   }
 
 
@@ -47,7 +47,7 @@ class DataStorage extends Storage {
     return storageID;
     return storageID;
   }
   }
   
   
-  void setStorageID( String newStorageID ) {
+  void setStorageID(String newStorageID) {
     this.storageID = newStorageID;
     this.storageID = newStorageID;
   }
   }
   
   
@@ -62,10 +62,10 @@ class DataStorage extends Storage {
    * @param startOpt startup option
    * @param startOpt startup option
    * @throws IOException
    * @throws IOException
    */
    */
-  void recoverTransitionRead( NamespaceInfo nsInfo,
-                              Collection<File> dataDirs,
-                              StartupOption startOpt
-                              ) throws IOException {
+  void recoverTransitionRead(NamespaceInfo nsInfo,
+                             Collection<File> dataDirs,
+                             StartupOption startOpt
+                             ) throws IOException {
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
       "Data-node and name-node layout versions must be the same.";
       "Data-node and name-node layout versions must be the same.";
     
     
@@ -73,53 +73,53 @@ class DataStorage extends Storage {
     // check whether all is consistent before transitioning.
     // check whether all is consistent before transitioning.
     // Format and recover.
     // Format and recover.
     this.storageID = "";
     this.storageID = "";
-    this.storageDirs = new ArrayList<StorageDirectory>( dataDirs.size() );
-    ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>( dataDirs.size() );
-    for( Iterator<File> it = dataDirs.iterator(); it.hasNext(); ) {
+    this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
+    ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size());
+    for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
       File dataDir = it.next();
       File dataDir = it.next();
-      StorageDirectory sd = new StorageDirectory( dataDir );
+      StorageDirectory sd = new StorageDirectory(dataDir);
       StorageState curState;
       StorageState curState;
       try {
       try {
-        curState = sd.analyzeStorage( startOpt );
+        curState = sd.analyzeStorage(startOpt);
         // sd is locked but not opened
         // sd is locked but not opened
-        switch( curState ) {
+        switch(curState) {
         case NORMAL:
         case NORMAL:
           break;
           break;
         case NON_EXISTENT:
         case NON_EXISTENT:
           // ignore this storage
           // ignore this storage
-          LOG.info( "Storage directory " + dataDir + " does not exist." );
+          LOG.info("Storage directory " + dataDir + " does not exist.");
           it.remove();
           it.remove();
           continue;
           continue;
         case CONVERT:
         case CONVERT:
-          convertLayout( sd, nsInfo );
+          convertLayout(sd, nsInfo);
           break;
           break;
         case NOT_FORMATTED: // format
         case NOT_FORMATTED: // format
-          LOG.info( "Storage directory " + dataDir + " is not formatted." );
-          LOG.info( "Formatting ..." );
-          format( sd, nsInfo );
+          LOG.info("Storage directory " + dataDir + " is not formatted.");
+          LOG.info("Formatting ...");
+          format(sd, nsInfo);
           break;
           break;
         default:  // recovery part is common
         default:  // recovery part is common
-          sd.doRecover( curState );
+          sd.doRecover(curState);
         }
         }
       } catch (IOException ioe) {
       } catch (IOException ioe) {
         sd.unlock();
         sd.unlock();
         throw ioe;
         throw ioe;
       }
       }
       // add to the storage list
       // add to the storage list
-      addStorageDir( sd );
-      dataDirStates.add( curState );
+      addStorageDir(sd);
+      dataDirStates.add(curState);
     }
     }
 
 
-    if( dataDirs.size() == 0 )  // none of the data dirs exist
-      throw new IOException( 
-                            "All specified directories are not accessible or do not exist." );
+    if (dataDirs.size() == 0)  // none of the data dirs exist
+      throw new IOException(
+                            "All specified directories are not accessible or do not exist.");
 
 
     // 2. Do transitions
     // 2. Do transitions
     // Each storage directory is treated individually.
     // Each storage directory is treated individually.
     // During sturtup some of them can upgrade or rollback 
     // During sturtup some of them can upgrade or rollback 
     // while others could be uptodate for the regular startup.
     // while others could be uptodate for the regular startup.
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      doTransition( getStorageDir( idx ), nsInfo, startOpt );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      doTransition(getStorageDir(idx), nsInfo, startOpt);
       assert this.getLayoutVersion() == nsInfo.getLayoutVersion() :
       assert this.getLayoutVersion() == nsInfo.getLayoutVersion() :
         "Data-node and name-node layout versions must be the same.";
         "Data-node and name-node layout versions must be the same.";
       assert this.getCTime() == nsInfo.getCTime() :
       assert this.getCTime() == nsInfo.getCTime() :
@@ -130,7 +130,7 @@ class DataStorage extends Storage {
     this.writeAll();
     this.writeAll();
   }
   }
 
 
-  void format( StorageDirectory sd, NamespaceInfo nsInfo ) throws IOException {
+  void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
     sd.clearDirectory(); // create directory
     sd.clearDirectory(); // create directory
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.namespaceID = nsInfo.getNamespaceID();
     this.namespaceID = nsInfo.getNamespaceID();
@@ -139,42 +139,42 @@ class DataStorage extends Storage {
     sd.write();
     sd.write();
   }
   }
 
 
-  protected void setFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.setFields( props, sd );
-    props.setProperty( "storageID", storageID );
+  protected void setFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.setFields(props, sd);
+    props.setProperty("storageID", storageID);
   }
   }
 
 
-  protected void getFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.getFields( props, sd );
-    String ssid = props.getProperty( "storageID" );
-    if( ssid == null ||
-        ! ("".equals( storageID ) || "".equals( ssid ) ||
-           storageID.equals( ssid )))
-      throw new InconsistentFSStateException( sd.root,
-                                              "has incompatible storage Id." );
-    if( "".equals( storageID ) ) // update id only if it was empty
+  protected void getFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.getFields(props, sd);
+    String ssid = props.getProperty("storageID");
+    if (ssid == null ||
+        !("".equals(storageID) || "".equals(ssid) ||
+          storageID.equals(ssid)))
+      throw new InconsistentFSStateException(sd.root,
+                                             "has incompatible storage Id.");
+    if ("".equals(storageID)) // update id only if it was empty
       storageID = ssid;
       storageID = ssid;
   }
   }
 
 
-  boolean isConversionNeeded( StorageDirectory sd ) throws IOException {
-    File oldF = new File( sd.root, "storage" );
-    if( ! oldF.exists() )
+  boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+    File oldF = new File(sd.root, "storage");
+    if (!oldF.exists())
       return false;
       return false;
     // check consistency of the old storage
     // check consistency of the old storage
-    File oldDataDir = new File( sd.root, "data" );
-    if( ! oldDataDir.exists() ) 
-      throw new InconsistentFSStateException( sd.root,
-                                              "Old layout block directory " + oldDataDir + " is missing" ); 
-    if( ! oldDataDir.isDirectory() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldDataDir + " is not a directory." );
-    if( ! oldDataDir.canWrite() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldDataDir + " is not writable." );
+    File oldDataDir = new File(sd.root, "data");
+    if (!oldDataDir.exists()) 
+      throw new InconsistentFSStateException(sd.root,
+                                             "Old layout block directory " + oldDataDir + " is missing"); 
+    if (!oldDataDir.isDirectory())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldDataDir + " is not a directory.");
+    if (!oldDataDir.canWrite())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldDataDir + " is not writable.");
     return true;
     return true;
   }
   }
   
   
@@ -185,44 +185,44 @@ class DataStorage extends Storage {
    * @param nsInfo namespace information
    * @param nsInfo namespace information
    * @throws IOException
    * @throws IOException
    */
    */
-  private void convertLayout( StorageDirectory sd,
-                              NamespaceInfo nsInfo 
-                              ) throws IOException {
+  private void convertLayout(StorageDirectory sd,
+                             NamespaceInfo nsInfo 
+                             ) throws IOException {
     assert FSConstants.LAYOUT_VERSION < LAST_PRE_UPGRADE_LAYOUT_VERSION :
     assert FSConstants.LAYOUT_VERSION < LAST_PRE_UPGRADE_LAYOUT_VERSION :
       "Bad current layout version: FSConstants.LAYOUT_VERSION should decrease";
       "Bad current layout version: FSConstants.LAYOUT_VERSION should decrease";
-    File oldF = new File( sd.root, "storage" );
-    File oldDataDir = new File( sd.root, "data" );
+    File oldF = new File(sd.root, "storage");
+    File oldDataDir = new File(sd.root, "data");
     assert oldF.exists() : "Old datanode layout \"storage\" file is missing";
     assert oldF.exists() : "Old datanode layout \"storage\" file is missing";
     assert oldDataDir.exists() : "Old layout block directory \"data\" is missing";
     assert oldDataDir.exists() : "Old layout block directory \"data\" is missing";
-    LOG.info( "Old layout version file " + oldF
-              + " is found. New layout version is "
-              + FSConstants.LAYOUT_VERSION );
-    LOG.info( "Converting ..." );
+    LOG.info("Old layout version file " + oldF
+             + " is found. New layout version is "
+             + FSConstants.LAYOUT_VERSION);
+    LOG.info("Converting ...");
     
     
     // Lock and Read old storage file
     // Lock and Read old storage file
-    RandomAccessFile oldFile = new RandomAccessFile( oldF, "rws" );
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
     if (oldFile == null)
     if (oldFile == null)
-      throw new IOException( "Cannot read file: " + oldF );
+      throw new IOException("Cannot read file: " + oldF);
     FileLock oldLock = oldFile.getChannel().tryLock();
     FileLock oldLock = oldFile.getChannel().tryLock();
     if (oldLock == null)
     if (oldLock == null)
-      throw new IOException( "Cannot lock file: " + oldF );
+      throw new IOException("Cannot lock file: " + oldF);
     try {
     try {
       oldFile.seek(0);
       oldFile.seek(0);
       int odlVersion = oldFile.readInt();
       int odlVersion = oldFile.readInt();
-      if( odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION )
-        throw new IncorrectVersionException( odlVersion, "file " + oldF,
-                                             LAST_PRE_UPGRADE_LAYOUT_VERSION );
-      String odlStorageID = org.apache.hadoop.io.UTF8.readString( oldFile );
+      if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
+        throw new IncorrectVersionException(odlVersion, "file " + oldF,
+                                            LAST_PRE_UPGRADE_LAYOUT_VERSION);
+      String odlStorageID = org.apache.hadoop.io.UTF8.readString(oldFile);
   
   
       // check new storage
       // check new storage
       File newDataDir = sd.getCurrentDir();
       File newDataDir = sd.getCurrentDir();
       File versionF = sd.getVersionFile();
       File versionF = sd.getVersionFile();
-      if( versionF.exists() )
-        throw new IOException( "Version file already exists: " + versionF );
-      if( newDataDir.exists() ) // somebody created current dir manually
-        deleteDir( newDataDir );
+      if (versionF.exists())
+        throw new IOException("Version file already exists: " + versionF);
+      if (newDataDir.exists()) // somebody created current dir manually
+        deleteDir(newDataDir);
       // Write new layout
       // Write new layout
-      rename( oldDataDir, newDataDir );
+      rename(oldDataDir, newDataDir);
   
   
       this.layoutVersion = FSConstants.LAYOUT_VERSION;
       this.layoutVersion = FSConstants.LAYOUT_VERSION;
       this.namespaceID = nsInfo.getNamespaceID();
       this.namespaceID = nsInfo.getNamespaceID();
@@ -235,8 +235,8 @@ class DataStorage extends Storage {
       oldFile.close();
       oldFile.close();
     }
     }
     // move old storage file into current dir
     // move old storage file into current dir
-    rename( oldF, new File( sd.getCurrentDir(), "storage" ));
-    LOG.info( "Conversion of " + oldF + " is complete." );
+    rename(oldF, new File(sd.getCurrentDir(), "storage"));
+    LOG.info("Conversion of " + oldF + " is complete.");
   }
   }
 
 
   /**
   /**
@@ -252,26 +252,26 @@ class DataStorage extends Storage {
    * @param startOpt  startup option
    * @param startOpt  startup option
    * @throws IOException
    * @throws IOException
    */
    */
-  private void doTransition(  StorageDirectory sd, 
-                              NamespaceInfo nsInfo, 
-                              StartupOption startOpt
-                              ) throws IOException {
-    if( startOpt == StartupOption.ROLLBACK )
-      doRollback( sd, nsInfo ); // rollback if applicable
+  private void doTransition( StorageDirectory sd, 
+                             NamespaceInfo nsInfo, 
+                             StartupOption startOpt
+                             ) throws IOException {
+    if (startOpt == StartupOption.ROLLBACK)
+      doRollback(sd, nsInfo); // rollback if applicable
     sd.read();
     sd.read();
     assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
     assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
       "Future version is not allowed";
       "Future version is not allowed";
-    if( getNamespaceID() != nsInfo.getNamespaceID() )
-      throw new IOException( 
+    if (getNamespaceID() != nsInfo.getNamespaceID())
+      throw new IOException(
                             "Incompatible namespaceIDs in " + sd.root.getCanonicalPath()
                             "Incompatible namespaceIDs in " + sd.root.getCanonicalPath()
                             + ": namenode namespaceID = " + nsInfo.getNamespaceID() 
                             + ": namenode namespaceID = " + nsInfo.getNamespaceID() 
-                            + "; datanode namespaceID = " + getNamespaceID() );
-    if( this.layoutVersion == FSConstants.LAYOUT_VERSION 
-        && this.cTime == nsInfo.getCTime() )
+                            + "; datanode namespaceID = " + getNamespaceID());
+    if (this.layoutVersion == FSConstants.LAYOUT_VERSION 
+        && this.cTime == nsInfo.getCTime())
       return; // regular startup
       return; // regular startup
-    if( this.layoutVersion > FSConstants.LAYOUT_VERSION
-        || this.cTime < nsInfo.getCTime() ) {
-      doUpgrade( sd, nsInfo );  // upgrade
+    if (this.layoutVersion > FSConstants.LAYOUT_VERSION
+        || this.cTime < nsInfo.getCTime()) {
+      doUpgrade(sd, nsInfo);  // upgrade
       return;
       return;
     }
     }
     // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
     // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
@@ -280,7 +280,7 @@ class DataStorage extends Storage {
                           + " CTime = " + this.getCTime() 
                           + " CTime = " + this.getCTime() 
                           + " is newer than the namespace state: LV = "
                           + " is newer than the namespace state: LV = "
                           + nsInfo.getLayoutVersion() 
                           + nsInfo.getLayoutVersion() 
-                          + " CTime = " + nsInfo.getCTime() );
+                          + " CTime = " + nsInfo.getCTime());
   }
   }
 
 
   /**
   /**
@@ -290,26 +290,26 @@ class DataStorage extends Storage {
    * @param sd  storage directory
    * @param sd  storage directory
    * @throws IOException
    * @throws IOException
    */
    */
-  void doUpgrade( StorageDirectory sd,
-                  NamespaceInfo nsInfo
-                  ) throws IOException {
-    LOG.info( "Upgrading storage directory " + sd.root 
-              + ".\n   old LV = " + this.getLayoutVersion()
-              + "; old CTime = " + this.getCTime()
-              + ".\n   new LV = " + nsInfo.getLayoutVersion()
-              + "; new CTime = " + nsInfo.getCTime() );
+  void doUpgrade(StorageDirectory sd,
+                 NamespaceInfo nsInfo
+                 ) throws IOException {
+    LOG.info("Upgrading storage directory " + sd.root 
+             + ".\n   old LV = " + this.getLayoutVersion()
+             + "; old CTime = " + this.getCTime()
+             + ".\n   new LV = " + nsInfo.getLayoutVersion()
+             + "; new CTime = " + nsInfo.getCTime());
     File curDir = sd.getCurrentDir();
     File curDir = sd.getCurrentDir();
     File prevDir = sd.getPreviousDir();
     File prevDir = sd.getPreviousDir();
     assert curDir.exists() : "Current directory must exist.";
     assert curDir.exists() : "Current directory must exist.";
     // delete previous dir before upgrading
     // delete previous dir before upgrading
-    if( prevDir.exists() )
-      deleteDir( prevDir );
+    if (prevDir.exists())
+      deleteDir(prevDir);
     File tmpDir = sd.getPreviousTmp();
     File tmpDir = sd.getPreviousTmp();
-    assert ! tmpDir.exists() : "previous.tmp directory must not exist.";
+    assert !tmpDir.exists() : "previous.tmp directory must not exist.";
     // rename current to tmp
     // rename current to tmp
-    rename( curDir, tmpDir );
+    rename(curDir, tmpDir);
     // hardlink blocks
     // hardlink blocks
-    linkBlocks( tmpDir, curDir );
+    linkBlocks(tmpDir, curDir);
     // write version file
     // write version file
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     assert this.namespaceID == nsInfo.getNamespaceID() :
     assert this.namespaceID == nsInfo.getNamespaceID() :
@@ -317,69 +317,69 @@ class DataStorage extends Storage {
     this.cTime = nsInfo.getCTime();
     this.cTime = nsInfo.getCTime();
     sd.write();
     sd.write();
     // rename tmp to previous
     // rename tmp to previous
-    rename( tmpDir, prevDir );
-    LOG.info( "Upgrade of " + sd.root + " is complete." );
+    rename(tmpDir, prevDir);
+    LOG.info("Upgrade of " + sd.root + " is complete.");
   }
   }
 
 
-  void doRollback(  StorageDirectory sd,
-                    NamespaceInfo nsInfo
-                    ) throws IOException {
+  void doRollback( StorageDirectory sd,
+                   NamespaceInfo nsInfo
+                   ) throws IOException {
     File prevDir = sd.getPreviousDir();
     File prevDir = sd.getPreviousDir();
     // regular startup if previous dir does not exist
     // regular startup if previous dir does not exist
-    if( ! prevDir.exists() )
+    if (!prevDir.exists())
       return;
       return;
     DataStorage prevInfo = new DataStorage();
     DataStorage prevInfo = new DataStorage();
-    StorageDirectory prevSD = prevInfo.new StorageDirectory( sd.root );
-    prevSD.read( prevSD.getPreviousVersionFile() );
+    StorageDirectory prevSD = prevInfo.new StorageDirectory(sd.root);
+    prevSD.read(prevSD.getPreviousVersionFile());
 
 
     // We allow rollback to a state, which is either consistent with
     // We allow rollback to a state, which is either consistent with
     // the namespace state or can be further upgraded to it.
     // the namespace state or can be further upgraded to it.
-    if( ! ( prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
-            && prevInfo.getCTime() <= nsInfo.getCTime() ))  // cannot rollback
-      throw new InconsistentFSStateException( prevSD.root,
-                                              "Cannot rollback to a newer state.\nDatanode previous state: LV = " 
-                                              + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() 
-                                              + " is newer than the namespace state: LV = "
-                                              + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime() );
-    LOG.info( "Rolling back storage directory " + sd.root 
-              + ".\n   target LV = " + nsInfo.getLayoutVersion()
-              + "; target CTime = " + nsInfo.getCTime() );
+    if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
+          && prevInfo.getCTime() <= nsInfo.getCTime()))  // cannot rollback
+      throw new InconsistentFSStateException(prevSD.root,
+                                             "Cannot rollback to a newer state.\nDatanode previous state: LV = " 
+                                             + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() 
+                                             + " is newer than the namespace state: LV = "
+                                             + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
+    LOG.info("Rolling back storage directory " + sd.root 
+             + ".\n   target LV = " + nsInfo.getLayoutVersion()
+             + "; target CTime = " + nsInfo.getCTime());
     File tmpDir = sd.getRemovedTmp();
     File tmpDir = sd.getRemovedTmp();
-    assert ! tmpDir.exists() : "removed.tmp directory must not exist.";
+    assert !tmpDir.exists() : "removed.tmp directory must not exist.";
     // rename current to tmp
     // rename current to tmp
     File curDir = sd.getCurrentDir();
     File curDir = sd.getCurrentDir();
     assert curDir.exists() : "Current directory must exist.";
     assert curDir.exists() : "Current directory must exist.";
-    rename( curDir, tmpDir );
+    rename(curDir, tmpDir);
     // rename previous to current
     // rename previous to current
-    rename( prevDir, curDir );
+    rename(prevDir, curDir);
     // delete tmp dir
     // delete tmp dir
-    deleteDir( tmpDir );
-    LOG.info( "Rollback of " + sd.root + " is complete." );
+    deleteDir(tmpDir);
+    LOG.info("Rollback of " + sd.root + " is complete.");
   }
   }
 
 
-  void doFinalize( StorageDirectory sd ) throws IOException {
+  void doFinalize(StorageDirectory sd) throws IOException {
     File prevDir = sd.getPreviousDir();
     File prevDir = sd.getPreviousDir();
-    if( ! prevDir.exists() )
+    if (!prevDir.exists())
       return; // already discarded
       return; // already discarded
     final String dataDirPath = sd.root.getCanonicalPath();
     final String dataDirPath = sd.root.getCanonicalPath();
-    LOG.info( "Finalizing upgrade for storage directory " 
-              + dataDirPath 
-              + ".\n   cur LV = " + this.getLayoutVersion()
-              + "; cur CTime = " + this.getCTime() );
+    LOG.info("Finalizing upgrade for storage directory " 
+             + dataDirPath 
+             + ".\n   cur LV = " + this.getLayoutVersion()
+             + "; cur CTime = " + this.getCTime());
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     final File tmpDir = sd.getFinalizedTmp();
     final File tmpDir = sd.getFinalizedTmp();
     // rename previous to tmp
     // rename previous to tmp
-    rename( prevDir, tmpDir );
+    rename(prevDir, tmpDir);
 
 
     // delete tmp dir in a separate thread
     // delete tmp dir in a separate thread
-    new Daemon( new Runnable() {
+    new Daemon(new Runnable() {
         public void run() {
         public void run() {
           try {
           try {
-            deleteDir( tmpDir );
-          } catch( IOException ex ) {
-            LOG.error( "Finalize upgrade for " + dataDirPath + " failed.", ex );
+            deleteDir(tmpDir);
+          } catch(IOException ex) {
+            LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
           }
           }
-          LOG.info( "Finalize upgrade for " + dataDirPath + " is complete." );
+          LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
         }
         }
         public String toString() { return "Finalize " + dataDirPath; }
         public String toString() { return "Finalize " + dataDirPath; }
       }).start();
       }).start();
@@ -387,26 +387,26 @@ class DataStorage extends Storage {
   
   
   void finalizeUpgrade() throws IOException {
   void finalizeUpgrade() throws IOException {
     for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) {
     for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) {
-      doFinalize( it.next() );
+      doFinalize(it.next());
     }
     }
   }
   }
   
   
-  static void linkBlocks( File from, File to ) throws IOException {
-    if( ! from.isDirectory() ) {
-      HardLink.createHardLink( from, to );
+  static void linkBlocks(File from, File to) throws IOException {
+    if (!from.isDirectory()) {
+      HardLink.createHardLink(from, to);
       return;
       return;
     }
     }
     // from is a directory
     // from is a directory
-    if( ! to.mkdir() )
-      throw new IOException("Cannot create directory " + to );
-    String[] blockNames = from.list( new java.io.FilenameFilter() {
+    if (!to.mkdir())
+      throw new IOException("Cannot create directory " + to);
+    String[] blockNames = from.list(new java.io.FilenameFilter() {
         public boolean accept(File dir, String name) {
         public boolean accept(File dir, String name) {
-          return name.startsWith( BLOCK_SUBDIR_PREFIX ) 
-            || name.startsWith( BLOCK_FILE_PREFIX );
+          return name.startsWith(BLOCK_SUBDIR_PREFIX) 
+            || name.startsWith(BLOCK_FILE_PREFIX);
         }
         }
       });
       });
     
     
-    for( int i = 0; i < blockNames.length; i++ )
-      linkBlocks( new File(from, blockNames[i]), new File(to, blockNames[i]) );
+    for(int i = 0; i < blockNames.length; i++)
+      linkBlocks(new File(from, blockNames[i]), new File(to, blockNames[i]));
   }
   }
 }
 }

+ 23 - 23
src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java

@@ -60,8 +60,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
   /** DatanodeDescriptor constructor
   /** DatanodeDescriptor constructor
    * @param nodeID id of the data node
    * @param nodeID id of the data node
    */
    */
-  public DatanodeDescriptor( DatanodeID nodeID ) {
-    this( nodeID, 0L, 0L, 0 );
+  public DatanodeDescriptor(DatanodeID nodeID) {
+    this(nodeID, 0L, 0L, 0);
   }
   }
 
 
   /** DatanodeDescriptor constructor
   /** DatanodeDescriptor constructor
@@ -69,9 +69,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param nodeID id of the data node
    * @param nodeID id of the data node
    * @param networkLocation location of the data node in network
    * @param networkLocation location of the data node in network
    */
    */
-  public DatanodeDescriptor( DatanodeID nodeID, 
-                             String networkLocation ) {
-    this( nodeID, networkLocation, null );
+  public DatanodeDescriptor(DatanodeID nodeID, 
+                            String networkLocation) {
+    this(nodeID, networkLocation, null);
   }
   }
   
   
   /** DatanodeDescriptor constructor
   /** DatanodeDescriptor constructor
@@ -80,10 +80,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param networkLocation location of the data node in network
    * @param networkLocation location of the data node in network
    * @param hostName it could be different from host specified for DatanodeID
    * @param hostName it could be different from host specified for DatanodeID
    */
    */
-  public DatanodeDescriptor( DatanodeID nodeID, 
-                             String networkLocation,
-                             String hostName ) {
-    this( nodeID, networkLocation, hostName, 0L, 0L, 0 );
+  public DatanodeDescriptor(DatanodeID nodeID, 
+                            String networkLocation,
+                            String hostName) {
+    this(nodeID, networkLocation, hostName, 0L, 0L, 0);
   }
   }
   
   
   /** DatanodeDescriptor constructor
   /** DatanodeDescriptor constructor
@@ -93,11 +93,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param remaining remaing capacity of the data node
    * @param remaining remaing capacity of the data node
    * @param xceiverCount # of data transfers at the data node
    * @param xceiverCount # of data transfers at the data node
    */
    */
-  public DatanodeDescriptor( DatanodeID nodeID, 
-                             long capacity, 
-                             long remaining,
-                             int xceiverCount ) {
-    super( nodeID );
+  public DatanodeDescriptor(DatanodeID nodeID, 
+                            long capacity, 
+                            long remaining,
+                            int xceiverCount) {
+    super(nodeID);
     updateHeartbeat(capacity, remaining, xceiverCount);
     updateHeartbeat(capacity, remaining, xceiverCount);
     initWorkLists();
     initWorkLists();
   }
   }
@@ -110,14 +110,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param remaining remaing capacity of the data node
    * @param remaining remaing capacity of the data node
    * @param xceiverCount # of data transfers at the data node
    * @param xceiverCount # of data transfers at the data node
    */
    */
-  public DatanodeDescriptor( DatanodeID nodeID,
-                             String networkLocation,
-                             String hostName,
-                             long capacity, 
-                             long remaining,
-                             int xceiverCount ) {
-    super( nodeID, networkLocation, hostName );
-    updateHeartbeat( capacity, remaining, xceiverCount);
+  public DatanodeDescriptor(DatanodeID nodeID,
+                            String networkLocation,
+                            String hostName,
+                            long capacity, 
+                            long remaining,
+                            int xceiverCount) {
+    super(nodeID, networkLocation, hostName);
+    updateHeartbeat(capacity, remaining, xceiverCount);
     initWorkLists();
     initWorkLists();
   }
   }
 
 
@@ -169,7 +169,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
   }
   
   
   Block getBlock(long blockId) {
   Block getBlock(long blockId) {
-    return blocks.get( new Block(blockId, 0) );
+    return blocks.get(new Block(blockId, 0));
   }
   }
   
   
   Block getBlock(Block b) {
   Block getBlock(Block b) {

+ 7 - 7
src/java/org/apache/hadoop/dfs/DatanodeID.java

@@ -24,7 +24,7 @@ public class DatanodeID implements WritableComparable {
    * DatanodeID default constructor
    * DatanodeID default constructor
    */
    */
   public DatanodeID() {
   public DatanodeID() {
-    this( new String(), new String(), -1 );
+    this(new String(), new String(), -1);
   }
   }
 
 
   /**
   /**
@@ -32,8 +32,8 @@ public class DatanodeID implements WritableComparable {
    * 
    * 
    * @param from
    * @param from
    */
    */
-  public DatanodeID( DatanodeID from ) {
-    this( from.getName(), from.getStorageID(), from.getInfoPort() );
+  public DatanodeID(DatanodeID from) {
+    this(from.getName(), from.getStorageID(), from.getInfoPort());
   }
   }
   
   
   /**
   /**
@@ -42,7 +42,7 @@ public class DatanodeID implements WritableComparable {
    * @param nodeName (hostname:portNumber) 
    * @param nodeName (hostname:portNumber) 
    * @param storageID data storage ID
    * @param storageID data storage ID
    */
    */
-  public DatanodeID( String nodeName, String storageID, int infoPort ) {
+  public DatanodeID(String nodeName, String storageID, int infoPort) {
     this.name = nodeName;
     this.name = nodeName;
     this.storageID = storageID;
     this.storageID = storageID;
     this.infoPort = infoPort;
     this.infoPort = infoPort;
@@ -90,13 +90,13 @@ public class DatanodeID implements WritableComparable {
   
   
   public int getPort() {
   public int getPort() {
     int colon = name.indexOf(":");
     int colon = name.indexOf(":");
-    if ( colon < 0 ) {
+    if (colon < 0) {
       return 50010; // default port.
       return 50010; // default port.
     }
     }
     return Integer.parseInt(name.substring(colon+1));
     return Integer.parseInt(name.substring(colon+1));
   }
   }
 
 
-  public boolean equals( Object to ) {
+  public boolean equals(Object to) {
     return (name.equals(((DatanodeID)to).getName()) &&
     return (name.equals(((DatanodeID)to).getName()) &&
             storageID.equals(((DatanodeID)to).getStorageID()));
             storageID.equals(((DatanodeID)to).getStorageID()));
   }
   }
@@ -113,7 +113,7 @@ public class DatanodeID implements WritableComparable {
    * Update fields when a new registration request comes in.
    * Update fields when a new registration request comes in.
    * Note that this does not update storageID.
    * Note that this does not update storageID.
    */
    */
-  void updateRegInfo( DatanodeID nodeReg ) {
+  void updateRegInfo(DatanodeID nodeReg) {
     name = nodeReg.getName();
     name = nodeReg.getName();
     infoPort = nodeReg.getInfoPort();
     infoPort = nodeReg.getInfoPort();
     // update any more fields added in future.
     // update any more fields added in future.

+ 16 - 16
src/java/org/apache/hadoop/dfs/DatanodeInfo.java

@@ -63,8 +63,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
     adminState = null;
     adminState = null;
   }
   }
   
   
-  DatanodeInfo( DatanodeInfo from ) {
-    super( from );
+  DatanodeInfo(DatanodeInfo from) {
+    super(from);
     this.capacity = from.getCapacity();
     this.capacity = from.getCapacity();
     this.remaining = from.getRemaining();
     this.remaining = from.getRemaining();
     this.lastUpdate = from.getLastUpdate();
     this.lastUpdate = from.getLastUpdate();
@@ -74,8 +74,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.hostName = from.hostName;
     this.hostName = from.hostName;
   }
   }
 
 
-  DatanodeInfo( DatanodeID nodeID ) {
-    super( nodeID );
+  DatanodeInfo(DatanodeID nodeID) {
+    super(nodeID);
     this.capacity = 0L;
     this.capacity = 0L;
     this.remaining = 0L;
     this.remaining = 0L;
     this.lastUpdate = 0L;
     this.lastUpdate = 0L;
@@ -83,7 +83,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.adminState = null;    
     this.adminState = null;    
   }
   }
   
   
-  DatanodeInfo( DatanodeID nodeID, String location, String hostName ) {
+  DatanodeInfo(DatanodeID nodeID, String location, String hostName) {
     this(nodeID);
     this(nodeID);
     this.location = location;
     this.location = location;
     this.hostName = hostName;
     this.hostName = hostName;
@@ -135,10 +135,10 @@ public class DatanodeInfo extends DatanodeID implements Node {
 
 
   
   
   public String getHostName() {
   public String getHostName() {
-    return ( hostName == null || hostName.length()==0 ) ? getHost() : hostName;
+    return (hostName == null || hostName.length()==0) ? getHost() : hostName;
   }
   }
   
   
-  public void setHostName( String host ) {
+  public void setHostName(String host) {
     hostName = host;
     hostName = host;
   }
   }
   
   
@@ -149,7 +149,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long r = getRemaining();
     long r = getRemaining();
     long u = c - r;
     long u = c - r;
     buffer.append("Name: "+name+"\n");
     buffer.append("Name: "+name+"\n");
-    if(!NetworkTopology.DEFAULT_RACK.equals(location)) {
+    if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append("Rack: "+location+"\n");
       buffer.append("Rack: "+location+"\n");
     }
     }
     if (isDecommissioned()) {
     if (isDecommissioned()) {
@@ -161,7 +161,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     }
     }
     buffer.append("Total raw bytes: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
     buffer.append("Total raw bytes: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
     buffer.append("Used raw bytes: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
     buffer.append("Used raw bytes: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
-    buffer.append("% used: "+FsShell.limitDecimal(((1.0*u)/c)*100,2)+"%"+"\n");
+    buffer.append("% used: "+FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%"+"\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     return buffer.toString();
     return buffer.toString();
   }
   }
@@ -173,7 +173,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long r = getRemaining();
     long r = getRemaining();
     long u = c - r;
     long u = c - r;
     buffer.append(name);
     buffer.append(name);
-    if(!NetworkTopology.DEFAULT_RACK.equals(location)) {
+    if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append(" "+location);
       buffer.append(" "+location);
     }
     }
     if (isDecommissioned()) {
     if (isDecommissioned()) {
@@ -185,7 +185,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     }
     }
     buffer.append(" " + c + "(" + FsShell.byteDesc(c)+")");
     buffer.append(" " + c + "(" + FsShell.byteDesc(c)+")");
     buffer.append(" " + u + "(" + FsShell.byteDesc(u)+")");
     buffer.append(" " + u + "(" + FsShell.byteDesc(u)+")");
-    buffer.append(" " + FsShell.limitDecimal(((1.0*u)/c)*100,2)+"%");
+    buffer.append(" " + FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%");
     buffer.append(" " + new Date(lastUpdate));
     buffer.append(" " + new Date(lastUpdate));
     return buffer.toString();
     return buffer.toString();
   }
   }
@@ -260,13 +260,13 @@ public class DatanodeInfo extends DatanodeID implements Node {
 
 
   /** Return this node's parent */
   /** Return this node's parent */
   public Node getParent() { return parent; }
   public Node getParent() { return parent; }
-  public void setParent( Node parent ) {this.parent = parent;}
+  public void setParent(Node parent) {this.parent = parent;}
    
    
   /** Return this node's level in the tree.
   /** Return this node's level in the tree.
    * E.g. the root of a tree returns 0 and its children return 1
    * E.g. the root of a tree returns 0 and its children return 1
    */
    */
   public int getLevel() { return level; }
   public int getLevel() { return level; }
-  public void setLevel( int level) {this.level = level;}
+  public void setLevel(int level) {this.level = level;}
 
 
   /////////////////////////////////////////////////
   /////////////////////////////////////////////////
   // Writable
   // Writable
@@ -282,12 +282,12 @@ public class DatanodeInfo extends DatanodeID implements Node {
   /**
   /**
    */
    */
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
-    super.write( out );
+    super.write(out);
     out.writeLong(capacity);
     out.writeLong(capacity);
     out.writeLong(remaining);
     out.writeLong(remaining);
     out.writeLong(lastUpdate);
     out.writeLong(lastUpdate);
     out.writeInt(xceiverCount);
     out.writeInt(xceiverCount);
-    Text.writeString( out, location );
+    Text.writeString(out, location);
     WritableUtils.writeEnum(out, getAdminState());
     WritableUtils.writeEnum(out, getAdminState());
   }
   }
 
 
@@ -299,7 +299,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.remaining = in.readLong();
     this.remaining = in.readLong();
     this.lastUpdate = in.readLong();
     this.lastUpdate = in.readLong();
     this.xceiverCount = in.readInt();
     this.xceiverCount = in.readInt();
-    this.location = Text.readString( in );
+    this.location = Text.readString(in);
     AdminStates newState = (AdminStates) WritableUtils.readEnum(in,
     AdminStates newState = (AdminStates) WritableUtils.readEnum(in,
                                                                 AdminStates.class);
                                                                 AdminStates.class);
     setAdminState(newState);
     setAdminState(newState);

+ 9 - 9
src/java/org/apache/hadoop/dfs/DatanodeProtocol.java

@@ -64,9 +64,9 @@ interface DatanodeProtocol extends VersionedProtocol {
    * new storageID if the datanode did not have one and
    * new storageID if the datanode did not have one and
    * registration ID for further communication.
    * registration ID for further communication.
    */
    */
-  public DatanodeRegistration register( DatanodeRegistration registration,
-                                        String networkLocation
-                                        ) throws IOException;
+  public DatanodeRegistration register(DatanodeRegistration registration,
+                                       String networkLocation
+                                       ) throws IOException;
   /**
   /**
    * sendHeartbeat() tells the NameNode that the DataNode is still
    * sendHeartbeat() tells the NameNode that the DataNode is still
    * alive and well.  Includes some status info, too. 
    * alive and well.  Includes some status info, too. 
@@ -74,10 +74,10 @@ interface DatanodeProtocol extends VersionedProtocol {
    * A DatanodeCommand tells the DataNode to invalidate local block(s), 
    * A DatanodeCommand tells the DataNode to invalidate local block(s), 
    * or to copy them to other DataNodes, etc.
    * or to copy them to other DataNodes, etc.
    */
    */
-  public DatanodeCommand sendHeartbeat( DatanodeRegistration registration,
-                                        long capacity, long remaining,
-                                        int xmitsInProgress,
-                                        int xceiverCount) throws IOException;
+  public DatanodeCommand sendHeartbeat(DatanodeRegistration registration,
+                                       long capacity, long remaining,
+                                       int xmitsInProgress,
+                                       int xceiverCount) throws IOException;
 
 
   /**
   /**
    * blockReport() tells the NameNode about all the locally-stored blocks.
    * blockReport() tells the NameNode about all the locally-stored blocks.
@@ -86,8 +86,8 @@ interface DatanodeProtocol extends VersionedProtocol {
    * the locally-stored blocks.  It's invoked upon startup and then
    * the locally-stored blocks.  It's invoked upon startup and then
    * infrequently afterwards.
    * infrequently afterwards.
    */
    */
-  public DatanodeCommand blockReport( DatanodeRegistration registration,
-                                      Block blocks[]) throws IOException;
+  public DatanodeCommand blockReport(DatanodeRegistration registration,
+                                     Block blocks[]) throws IOException;
     
     
   /**
   /**
    * blockReceived() allows the DataNode to tell the NameNode about
    * blockReceived() allows the DataNode to tell the NameNode about

+ 9 - 9
src/java/org/apache/hadoop/dfs/DatanodeRegistration.java

@@ -31,7 +31,7 @@ class DatanodeRegistration extends DatanodeID implements Writable {
    * Default constructor.
    * Default constructor.
    */
    */
   public DatanodeRegistration() {
   public DatanodeRegistration() {
-    super( null, null, -1 );
+    super(null, null, -1);
     this.storageInfo = new StorageInfo();
     this.storageInfo = new StorageInfo();
   }
   }
   
   
@@ -40,9 +40,9 @@ class DatanodeRegistration extends DatanodeID implements Writable {
    */
    */
   public DatanodeRegistration(String nodeName, 
   public DatanodeRegistration(String nodeName, 
                               int infoPort,
                               int infoPort,
-                              DataStorage storage ) {
-    super( nodeName, storage.getStorageID(), infoPort );
-    this.storageInfo = new StorageInfo( storage );
+                              DataStorage storage) {
+    super(nodeName, storage.getStorageID(), infoPort);
+    this.storageInfo = new StorageInfo(storage);
   }
   }
 
 
   /**
   /**
@@ -54,7 +54,7 @@ class DatanodeRegistration extends DatanodeID implements Writable {
   /**
   /**
    */
    */
   public String getRegistrationID() {
   public String getRegistrationID() {
-    return Storage.getRegistrationID( storageInfo );
+    return Storage.getRegistrationID(storageInfo);
   }
   }
 
 
   /////////////////////////////////////////////////
   /////////////////////////////////////////////////
@@ -63,10 +63,10 @@ class DatanodeRegistration extends DatanodeID implements Writable {
   /**
   /**
    */
    */
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
-    super.write( out );
-    out.writeInt( storageInfo.getLayoutVersion() );
-    out.writeInt( storageInfo.getNamespaceID() );
-    out.writeLong( storageInfo.getCTime() );
+    super.write(out);
+    out.writeInt(storageInfo.getLayoutVersion());
+    out.writeInt(storageInfo.getNamespaceID());
+    out.writeLong(storageInfo.getCTime());
   }
   }
 
 
   /**
   /**

+ 2 - 2
src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java

@@ -12,7 +12,7 @@ import java.io.IOException;
  */
  */
 class DisallowedDatanodeException extends IOException {
 class DisallowedDatanodeException extends IOException {
 
 
-  public DisallowedDatanodeException( DatanodeID nodeID ) {
-    super("Datanode denied communication with namenode: " + nodeID.getName() );
+  public DisallowedDatanodeException(DatanodeID nodeID) {
+    super("Datanode denied communication with namenode: " + nodeID.getName());
   }
   }
 }
 }

+ 16 - 16
src/java/org/apache/hadoop/dfs/DistributedFileSystem.java

@@ -64,7 +64,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
       setConf(conf);
       setConf(conf);
       String host = uri.getHost();
       String host = uri.getHost();
       int port = uri.getPort();
       int port = uri.getPort();
-      this.dfs = new DFSClient(new InetSocketAddress(host,port), conf);
+      this.dfs = new DFSClient(new InetSocketAddress(host, port), conf);
       this.uri = URI.create("hdfs://"+host+":"+port);
       this.uri = URI.create("hdfs://"+host+":"+port);
       this.localFs = getNamed("file:///", conf);
       this.localFs = getNamed("file:///", conf);
     }
     }
@@ -122,7 +122,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
     }
     }
 
 
     public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-      if (! exists(f)) {
+      if (!exists(f)) {
         throw new FileNotFoundException(f.toString());
         throw new FileNotFoundException(f.toString());
       }
       }
 
 
@@ -132,7 +132,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
     public FSDataOutputStream create(Path f, boolean overwrite,
     public FSDataOutputStream create(Path f, boolean overwrite,
                                      int bufferSize, short replication, long blockSize,
                                      int bufferSize, short replication, long blockSize,
                                      Progressable progress) throws IOException {
                                      Progressable progress) throws IOException {
-      if (exists(f) && ! overwrite) {
+      if (exists(f) && !overwrite) {
         throw new IOException("File already exists:"+f);
         throw new IOException("File already exists:"+f);
       }
       }
       Path parent = f.getParent();
       Path parent = f.getParent();
@@ -146,9 +146,9 @@ public class DistributedFileSystem extends ChecksumFileSystem {
                                     bufferSize);
                                     bufferSize);
     }
     }
     
     
-    public boolean setReplication( Path src, 
-                                   short replication
-                                   ) throws IOException {
+    public boolean setReplication(Path src, 
+                                  short replication
+                                  ) throws IOException {
       return dfs.setReplication(getPath(src), replication);
       return dfs.setReplication(getPath(src), replication);
     }
     }
     
     
@@ -223,7 +223,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
 
 
     /** @deprecated */ @Deprecated
     /** @deprecated */ @Deprecated
       public void lock(Path f, boolean shared) throws IOException {
       public void lock(Path f, boolean shared) throws IOException {
-      dfs.lock(getPath(f), ! shared);
+      dfs.lock(getPath(f), !shared);
     }
     }
 
 
     /** @deprecated */ @Deprecated
     /** @deprecated */ @Deprecated
@@ -232,13 +232,13 @@ public class DistributedFileSystem extends ChecksumFileSystem {
     }
     }
 
 
     @Override
     @Override
-      public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
       throws IOException {
       throws IOException {
       FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
       FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
     }
     }
 
 
     @Override
     @Override
-      public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
       throws IOException {
       throws IOException {
       FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
       FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
     }
     }
@@ -290,9 +290,9 @@ public class DistributedFileSystem extends ChecksumFileSystem {
      *  
      *  
      * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
      * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
      */
      */
-    public boolean setSafeMode( FSConstants.SafeModeAction action ) 
+    public boolean setSafeMode(FSConstants.SafeModeAction action) 
       throws IOException {
       throws IOException {
-      return dfs.setSafeMode( action );
+      return dfs.setSafeMode(action);
     }
     }
 
 
     /*
     /*
@@ -368,17 +368,17 @@ public class DistributedFileSystem extends ChecksumFileSystem {
   }
   }
 
 
   public DistributedFileSystem() {
   public DistributedFileSystem() {
-    super( new RawDistributedFileSystem() );
+    super(new RawDistributedFileSystem());
   }
   }
 
 
   /** @deprecated */
   /** @deprecated */
   public DistributedFileSystem(InetSocketAddress namenode,
   public DistributedFileSystem(InetSocketAddress namenode,
                                Configuration conf) throws IOException {
                                Configuration conf) throws IOException {
-    super( new RawDistributedFileSystem(namenode, conf) );
+    super(new RawDistributedFileSystem(namenode, conf));
   }
   }
 
 
   @Override
   @Override
-    public long getContentLength(Path f) throws IOException {
+  public long getContentLength(Path f) throws IOException {
     return fs.getContentLength(f);
     return fs.getContentLength(f);
   }
   }
 
 
@@ -404,9 +404,9 @@ public class DistributedFileSystem extends ChecksumFileSystem {
    *  
    *  
    * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
    * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
    */
    */
-  public boolean setSafeMode( FSConstants.SafeModeAction action ) 
+  public boolean setSafeMode(FSConstants.SafeModeAction action) 
     throws IOException {
     throws IOException {
-    return ((RawDistributedFileSystem)fs).setSafeMode( action );
+    return ((RawDistributedFileSystem)fs).setSafeMode(action);
   }
   }
 
 
   /*
   /*

+ 45 - 45
src/java/org/apache/hadoop/dfs/FSDataset.java

@@ -50,8 +50,8 @@ class FSDataset implements FSConstants {
       throws IOException {
       throws IOException {
       this.dir = dir;
       this.dir = dir;
       this.children = null;
       this.children = null;
-      if (! dir.exists()) {
-        if (! dir.mkdirs()) {
+      if (!dir.exists()) {
+        if (!dir.mkdirs()) {
           throw new IOException("Mkdirs failed to create " + 
           throw new IOException("Mkdirs failed to create " + 
                                 dir.toString());
                                 dir.toString());
         }
         }
@@ -78,14 +78,14 @@ class FSDataset implements FSConstants {
       }
       }
     }
     }
         
         
-    public File addBlock( Block b, File src ) throws IOException {
+    public File addBlock(Block b, File src) throws IOException {
       //First try without creating subdirectories
       //First try without creating subdirectories
-      File file = addBlock( b, src, false, false );          
-      return ( file != null ) ? file : addBlock( b, src, true, true );
+      File file = addBlock(b, src, false, false);          
+      return (file != null) ? file : addBlock(b, src, true, true);
     }
     }
 
 
-    private File addBlock( Block b, File src, boolean createOk, 
-                           boolean resetIdx ) throws IOException {
+    private File addBlock(Block b, File src, boolean createOk, 
+                          boolean resetIdx) throws IOException {
       if (numBlocks < maxBlocksPerDir) {
       if (numBlocks < maxBlocksPerDir) {
         File dest = new File(dir, b.getBlockName());
         File dest = new File(dir, b.getBlockName());
         src.renameTo(dest);
         src.renameTo(dest);
@@ -93,17 +93,17 @@ class FSDataset implements FSConstants {
         return dest;
         return dest;
       }
       }
             
             
-      if ( lastChildIdx < 0 && resetIdx ) {
+      if (lastChildIdx < 0 && resetIdx) {
         //reset so that all children will be checked
         //reset so that all children will be checked
-        lastChildIdx = random.nextInt( children.length );              
+        lastChildIdx = random.nextInt(children.length);              
       }
       }
             
             
-      if ( lastChildIdx >= 0 && children != null ) {
+      if (lastChildIdx >= 0 && children != null) {
         //Check if any child-tree has room for a block.
         //Check if any child-tree has room for a block.
         for (int i=0; i < children.length; i++) {
         for (int i=0; i < children.length; i++) {
-          int idx = ( lastChildIdx + i )%children.length;
-          File file = children[idx].addBlock( b, src, false, resetIdx );
-          if ( file != null ) {
+          int idx = (lastChildIdx + i)%children.length;
+          File file = children[idx].addBlock(b, src, false, resetIdx);
+          if (file != null) {
             lastChildIdx = idx;
             lastChildIdx = idx;
             return file; 
             return file; 
           }
           }
@@ -111,20 +111,20 @@ class FSDataset implements FSConstants {
         lastChildIdx = -1;
         lastChildIdx = -1;
       }
       }
             
             
-      if ( !createOk ) {
+      if (!createOk) {
         return null;
         return null;
       }
       }
             
             
-      if ( children == null || children.length == 0 ) {
+      if (children == null || children.length == 0) {
         children = new FSDir[maxBlocksPerDir];
         children = new FSDir[maxBlocksPerDir];
         for (int idx = 0; idx < maxBlocksPerDir; idx++) {
         for (int idx = 0; idx < maxBlocksPerDir; idx++) {
-          children[idx] = new FSDir( new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx) );
+          children[idx] = new FSDir(new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx));
         }
         }
       }
       }
             
             
       //now pick a child randomly for creating a new set of subdirs.
       //now pick a child randomly for creating a new set of subdirs.
-      lastChildIdx = random.nextInt( children.length );
-      return children[ lastChildIdx ].addBlock( b, src, true, false ); 
+      lastChildIdx = random.nextInt(children.length);
+      return children[ lastChildIdx ].addBlock(b, src, true, false); 
     }
     }
 
 
     /**
     /**
@@ -194,13 +194,13 @@ class FSDataset implements FSConstants {
     void clearPath(File f) {
     void clearPath(File f) {
       String root = dir.getAbsolutePath();
       String root = dir.getAbsolutePath();
       String dir = f.getAbsolutePath();
       String dir = f.getAbsolutePath();
-      if ( dir.startsWith( root ) ) {
-        String[] dirNames = dir.substring( root.length() ).
-          split( File.separator + "subdir" );
-        if ( clearPath( f, dirNames, 1 ) )
+      if (dir.startsWith(root)) {
+        String[] dirNames = dir.substring(root.length()).
+          split(File.separator + "subdir");
+        if (clearPath(f, dirNames, 1))
           return;
           return;
       }
       }
-      clearPath( f, null, -1 );
+      clearPath(f, null, -1);
     }
     }
         
         
     /*
     /*
@@ -211,33 +211,33 @@ class FSDataset implements FSConstants {
      * children in common case. If directory structure changes 
      * children in common case. If directory structure changes 
      * in later versions, we need to revisit this.
      * in later versions, we need to revisit this.
      */
      */
-    private boolean clearPath( File f, String[] dirNames, int idx ) {
-      if ( ( dirNames == null || idx == dirNames.length ) &&
-           dir.compareTo(f) == 0) {
+    private boolean clearPath(File f, String[] dirNames, int idx) {
+      if ((dirNames == null || idx == dirNames.length) &&
+          dir.compareTo(f) == 0) {
         numBlocks--;
         numBlocks--;
         return true;
         return true;
       }
       }
           
           
-      if ( dirNames != null ) {
+      if (dirNames != null) {
         //guess the child index from the directory name
         //guess the child index from the directory name
-        if ( idx > ( dirNames.length - 1 ) || children == null ) {
+        if (idx > (dirNames.length - 1) || children == null) {
           return false;
           return false;
         }
         }
         int childIdx; 
         int childIdx; 
         try {
         try {
-          childIdx = Integer.parseInt( dirNames[idx] );
-        } catch ( NumberFormatException ignored ) {
+          childIdx = Integer.parseInt(dirNames[idx]);
+        } catch (NumberFormatException ignored) {
           // layout changed? we could print a warning.
           // layout changed? we could print a warning.
           return false;
           return false;
         }
         }
-        return ( childIdx >= 0 && childIdx < children.length ) ?
-          children[childIdx].clearPath( f, dirNames, idx+1 ) : false;
+        return (childIdx >= 0 && childIdx < children.length) ?
+          children[childIdx].clearPath(f, dirNames, idx+1) : false;
       }
       }
 
 
       //guesses failed. back to blind iteration.
       //guesses failed. back to blind iteration.
-      if ( children != null ) {
+      if (children != null) {
         for(int i=0; i < children.length; i++) {
         for(int i=0; i < children.length; i++) {
-          if ( children[i].clearPath( f, null, -1 ) ){
+          if (children[i].clearPath(f, null, -1)){
             return true;
             return true;
           }
           }
         }
         }
@@ -262,12 +262,12 @@ class FSDataset implements FSConstants {
     private long reserved;
     private long reserved;
     private double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
     private double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
     
     
-    FSVolume( File currentDir, Configuration conf) throws IOException {
+    FSVolume(File currentDir, Configuration conf) throws IOException {
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
       this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct",
       this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct",
                                          (float) USABLE_DISK_PCT_DEFAULT);
                                          (float) USABLE_DISK_PCT_DEFAULT);
       File parent = currentDir.getParentFile();
       File parent = currentDir.getParentFile();
-      this.dataDir = new FSDir( currentDir );
+      this.dataDir = new FSDir(currentDir);
       this.tmpDir = new File(parent, "tmp");
       this.tmpDir = new File(parent, "tmp");
       if (tmpDir.exists()) {
       if (tmpDir.exists()) {
         FileUtil.fullyDelete(tmpDir);
         FileUtil.fullyDelete(tmpDir);
@@ -288,7 +288,7 @@ class FSDataset implements FSConstants {
       long capacity = usage.getCapacity();
       long capacity = usage.getCapacity();
       long freespace = Math.round(usage.getAvailableSkipRefresh() -
       long freespace = Math.round(usage.getAvailableSkipRefresh() -
                                   capacity * (1 - usableDiskPct) - reserved); 
                                   capacity * (1 - usableDiskPct) - reserved); 
-      return ( freespace > 0 ) ? freespace : 0;
+      return (freespace > 0) ? freespace : 0;
     }
     }
       
       
     String getMount() throws IOException {
     String getMount() throws IOException {
@@ -309,7 +309,7 @@ class FSDataset implements FSConstants {
                                 b + ".  File " + f + " should be creatable, but is already present.");
                                 b + ".  File " + f + " should be creatable, but is already present.");
         }
         }
       } catch (IOException ie) {
       } catch (IOException ie) {
-        System.out.println("Exception!  " + ie);
+        System.out.println("Exception! " + ie);
         throw ie;
         throw ie;
       }
       }
       return f;
       return f;
@@ -430,7 +430,7 @@ class FSDataset implements FSConstants {
   /**
   /**
    * An FSDataset has a directory where it loads its data files.
    * An FSDataset has a directory where it loads its data files.
    */
    */
-  public FSDataset( DataStorage storage, Configuration conf) throws IOException {
+  public FSDataset(DataStorage storage, Configuration conf) throws IOException {
     this.maxBlocksPerDir = conf.getInt("dfs.datanode.numblocks", 64);
     this.maxBlocksPerDir = conf.getInt("dfs.datanode.numblocks", 64);
     FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
     FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
@@ -461,7 +461,7 @@ class FSDataset implements FSConstants {
    * Find the block's on-disk length
    * Find the block's on-disk length
    */
    */
   public long getLength(Block b) throws IOException {
   public long getLength(Block b) throws IOException {
-    if (! isValidBlock(b)) {
+    if (!isValidBlock(b)) {
       throw new IOException("Block " + b + " is not valid.");
       throw new IOException("Block " + b + " is not valid.");
     }
     }
     File f = getFile(b);
     File f = getFile(b);
@@ -472,7 +472,7 @@ class FSDataset implements FSConstants {
    * Get a stream of data from the indicated block.
    * Get a stream of data from the indicated block.
    */
    */
   public synchronized InputStream getBlockData(Block b) throws IOException {
   public synchronized InputStream getBlockData(Block b) throws IOException {
-    if (! isValidBlock(b)) {
+    if (!isValidBlock(b)) {
       throw new IOException("Block " + b + " is not valid.");
       throw new IOException("Block " + b + " is not valid.");
     }
     }
     // File should be opened with the lock.
     // File should be opened with the lock.
@@ -495,7 +495,7 @@ class FSDataset implements FSConstants {
     // Serialize access to /tmp, and check if file already there.
     // Serialize access to /tmp, and check if file already there.
     //
     //
     File f = null;
     File f = null;
-    synchronized ( this ) {
+    synchronized (this) {
       //
       //
       // Is it already in the create process?
       // Is it already in the create process?
       //
       //
@@ -514,7 +514,7 @@ class FSDataset implements FSConstants {
         }
         }
       }
       }
       FSVolume v = null;
       FSVolume v = null;
-      synchronized ( volumes ) {
+      synchronized (volumes) {
         v = volumes.getNextVolume(blockSize);
         v = volumes.getNextVolume(blockSize);
         // create temporary file to hold block in the designated volume
         // create temporary file to hold block in the designated volume
         f = v.createTmpFile(b);
         f = v.createTmpFile(b);
@@ -544,7 +544,7 @@ class FSDataset implements FSConstants {
    */
    */
   public synchronized void finalizeBlock(Block b) throws IOException {
   public synchronized void finalizeBlock(Block b) throws IOException {
     File f = ongoingCreates.get(b);
     File f = ongoingCreates.get(b);
-    if (f == null || ! f.exists()) {
+    if (f == null || !f.exists()) {
       throw new IOException("No temporary file " + f + " for block " + b);
       throw new IOException("No temporary file " + f + " for block " + b);
     }
     }
     long finalLen = f.length();
     long finalLen = f.length();
@@ -552,7 +552,7 @@ class FSDataset implements FSConstants {
     FSVolume v = volumeMap.get(b);
     FSVolume v = volumeMap.get(b);
         
         
     File dest = null;
     File dest = null;
-    synchronized ( volumes ) {
+    synchronized (volumes) {
       dest = v.addBlock(b, f);
       dest = v.addBlock(b, f);
     }
     }
     blockMap.put(b, dest);
     blockMap.put(b, dest);

+ 66 - 66
src/java/org/apache/hadoop/dfs/FSDirectory.java

@@ -114,12 +114,12 @@ class FSDirectory implements FSConstants {
      * @return Iterator of children
      * @return Iterator of children
      */
      */
     Iterator<INode> getChildIterator() {
     Iterator<INode> getChildIterator() {
-      return ( children != null ) ?  children.values().iterator() : null;
+      return (children != null) ?  children.values().iterator() : null;
       // instead of null, we could return a static empty iterator.
       // instead of null, we could return a static empty iterator.
     }
     }
         
         
     void addChild(String name, INode node) {
     void addChild(String name, INode node) {
-      if ( children == null ) {
+      if (children == null) {
         children = new TreeMap<String, INode>();
         children = new TreeMap<String, INode>();
       }
       }
       children.put(name, node);
       children.put(name, node);
@@ -129,8 +129,8 @@ class FSDirectory implements FSConstants {
      * This is the external interface
      * This is the external interface
      */
      */
     INode getNode(String target) {
     INode getNode(String target) {
-      if ( target == null || 
-           ! target.startsWith("/") || target.length() == 0) {
+      if (target == null || 
+          !target.startsWith("/") || target.length() == 0) {
         return null;
         return null;
       } else if (parent == null && "/".equals(target)) {
       } else if (parent == null && "/".equals(target)) {
         return this;
         return this;
@@ -152,7 +152,7 @@ class FSDirectory implements FSConstants {
     /**
     /**
      */
      */
     INode getNode(Vector<String> components, int index) {
     INode getNode(Vector<String> components, int index) {
-      if (! name.equals(components.elementAt(index))) {
+      if (!name.equals(components.elementAt(index))) {
         return null;
         return null;
       }
       }
       if (index == components.size()-1) {
       if (index == components.size()-1) {
@@ -168,8 +168,8 @@ class FSDirectory implements FSConstants {
       }
       }
     }
     }
         
         
-    INode getChild( String name) {
-      return (children == null) ? null : children.get( name );
+    INode getChild(String name) {
+      return (children == null) ? null : children.get(name);
     }
     }
 
 
     /**
     /**
@@ -183,7 +183,7 @@ class FSDirectory implements FSConstants {
      * @author shv
      * @author shv
      */
      */
     INode addNode(String path, INode newNode) throws FileNotFoundException {
     INode addNode(String path, INode newNode) throws FileNotFoundException {
-      File target = new File( path );
+      File target = new File(path);
       // find parent
       // find parent
       Path parent = new Path(path).getParent();
       Path parent = new Path(path).getParent();
       if (parent == null) { // add root
       if (parent == null) { // add root
@@ -200,7 +200,7 @@ class FSDirectory implements FSConstants {
       }
       }
       // check whether the parent already has a node with that name
       // check whether the parent already has a node with that name
       String name = newNode.name = target.getName();
       String name = newNode.name = target.getName();
-      if( parentNode.getChild( name ) != null ) {
+      if (parentNode.getChild(name) != null) {
         return null;
         return null;
       }
       }
       // insert into the parent children list
       // insert into the parent children list
@@ -233,7 +233,7 @@ class FSDirectory implements FSConstants {
       }
       }
       incrDeletedFileCount();
       incrDeletedFileCount();
       for (Iterator<INode> it = getChildIterator(); it != null &&
       for (Iterator<INode> it = getChildIterator(); it != null &&
-             it.hasNext(); ) {
+             it.hasNext();) {
         it.next().collectSubtreeBlocks(v);
         it.next().collectSubtreeBlocks(v);
       }
       }
     }
     }
@@ -243,7 +243,7 @@ class FSDirectory implements FSConstants {
     int numItemsInTree() {
     int numItemsInTree() {
       int total = 0;
       int total = 0;
       for (Iterator<INode> it = getChildIterator(); it != null && 
       for (Iterator<INode> it = getChildIterator(); it != null && 
-             it.hasNext(); ) {
+             it.hasNext();) {
         total += it.next().numItemsInTree();
         total += it.next().numItemsInTree();
       }
       }
       return total + 1;
       return total + 1;
@@ -276,7 +276,7 @@ class FSDirectory implements FSConstants {
     long computeContentsLength() {
     long computeContentsLength() {
       long total = computeFileLength();
       long total = computeFileLength();
       for (Iterator<INode> it = getChildIterator(); it != null && 
       for (Iterator<INode> it = getChildIterator(); it != null && 
-             it.hasNext(); ) {
+             it.hasNext();) {
         total += it.next().computeContentsLength();
         total += it.next().computeContentsLength();
       }
       }
       return total;
       return total;
@@ -302,7 +302,7 @@ class FSDirectory implements FSConstants {
       }
       }
 
 
       for (Iterator<INode> it = getChildIterator(); it != null && 
       for (Iterator<INode> it = getChildIterator(); it != null && 
-             it.hasNext(); ) {
+             it.hasNext();) {
         v.add(it.next());
         v.add(it.next());
       }
       }
     }
     }
@@ -335,17 +335,17 @@ class FSDirectory implements FSConstants {
     directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
     directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
   }
   }
 
 
-  void loadFSImage( Collection<File> dataDirs,
-                    StartupOption startOpt ) throws IOException {
+  void loadFSImage(Collection<File> dataDirs,
+                   StartupOption startOpt) throws IOException {
     // format before starting up if requested
     // format before starting up if requested
-    if( startOpt == StartupOption.FORMAT ) {
-      fsImage.setStorageDirectories( dataDirs );
+    if (startOpt == StartupOption.FORMAT) {
+      fsImage.setStorageDirectories(dataDirs);
       fsImage.format();
       fsImage.format();
       startOpt = StartupOption.REGULAR;
       startOpt = StartupOption.REGULAR;
     }
     }
     try {
     try {
-      fsImage.recoverTransitionRead( dataDirs, startOpt );
-    } catch( IOException e ) {
+      fsImage.recoverTransitionRead(dataDirs, startOpt);
+    } catch(IOException e) {
       fsImage.close();
       fsImage.close();
       throw e;
       throw e;
     }
     }
@@ -371,7 +371,7 @@ class FSDirectory implements FSConstants {
    * Block until the object is ready to be used.
    * Block until the object is ready to be used.
    */
    */
   void waitForReady() {
   void waitForReady() {
-    if (! ready) {
+    if (!ready) {
       synchronized (this) {
       synchronized (this) {
         while (!ready) {
         while (!ready) {
           try {
           try {
@@ -391,20 +391,20 @@ class FSDirectory implements FSConstants {
 
 
     // Always do an implicit mkdirs for parent directory tree
     // Always do an implicit mkdirs for parent directory tree
     String pathString = path.toString();
     String pathString = path.toString();
-    if( ! mkdirs(new Path(pathString).getParent().toString()) ) {
+    if (!mkdirs(new Path(pathString).getParent().toString())) {
       return false;
       return false;
     }
     }
-    INode newNode = new INode( new File(pathString).getName(), blocks, replication);
-    if( ! unprotectedAddFile(path, newNode) ) {
+    INode newNode = new INode(new File(pathString).getName(), blocks, replication);
+    if (!unprotectedAddFile(path, newNode)) {
       NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
       NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
                                    +"failed to add "+path+" with "
                                    +"failed to add "+path+" with "
-                                   +blocks.length+" blocks to the file system" );
+                                   +blocks.length+" blocks to the file system");
       return false;
       return false;
     }
     }
     // add create file record to log
     // add create file record to log
-    fsImage.getEditLog().logCreateFile( newNode );
+    fsImage.getEditLog().logCreateFile(newNode);
     NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
     NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
-                                  +path+" with "+blocks.length+" blocks is added to the file system" );
+                                  +path+" with "+blocks.length+" blocks is added to the file system");
     return true;
     return true;
   }
   }
     
     
@@ -413,7 +413,7 @@ class FSDirectory implements FSConstants {
   boolean unprotectedAddFile(UTF8 path, INode newNode) {
   boolean unprotectedAddFile(UTF8 path, INode newNode) {
     synchronized (rootDir) {
     synchronized (rootDir) {
       try {
       try {
-        if( rootDir.addNode(path.toString(), newNode ) != null ) {
+        if (rootDir.addNode(path.toString(), newNode) != null) {
           int nrBlocks = (newNode.blocks == null) ? 0 : newNode.blocks.length;
           int nrBlocks = (newNode.blocks == null) ? 0 : newNode.blocks.length;
           // Add file->block mapping
           // Add file->block mapping
           for (int i = 0; i < nrBlocks; i++)
           for (int i = 0; i < nrBlocks; i++)
@@ -422,15 +422,15 @@ class FSDirectory implements FSConstants {
         } else {
         } else {
           return false;
           return false;
         }
         }
-      } catch (FileNotFoundException e ) {
+      } catch (FileNotFoundException e) {
         return false;
         return false;
       }
       }
     }
     }
   }
   }
     
     
-  boolean unprotectedAddFile(UTF8 path, Block[] blocks, short replication ) {
-    return unprotectedAddFile( path,  
-                               new INode( path.toString(), blocks, replication ));
+  boolean unprotectedAddFile(UTF8 path, Block[] blocks, short replication) {
+    return unprotectedAddFile(path,  
+                              new INode(path.toString(), blocks, replication));
   }
   }
 
 
   /**
   /**
@@ -438,9 +438,9 @@ class FSDirectory implements FSConstants {
    */
    */
   public boolean renameTo(UTF8 src, UTF8 dst) {
   public boolean renameTo(UTF8 src, UTF8 dst) {
     NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: "
     NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: "
-                                  +src+" to "+dst );
+                                  +src+" to "+dst);
     waitForReady();
     waitForReady();
-    if( ! unprotectedRenameTo(src, dst) )
+    if (!unprotectedRenameTo(src, dst))
       return false;
       return false;
     fsImage.getEditLog().logRename(src, dst);
     fsImage.getEditLog().logRename(src, dst);
     return true;
     return true;
@@ -455,29 +455,29 @@ class FSDirectory implements FSConstants {
       INode renamedNode = rootDir.getNode(srcStr);
       INode renamedNode = rootDir.getNode(srcStr);
       if (renamedNode == null) {
       if (renamedNode == null) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                                     +"failed to rename "+src+" to "+dst+ " because source does not exist" );
+                                     +"failed to rename "+src+" to "+dst+ " because source does not exist");
         return false;
         return false;
       }
       }
       if (isDir(dst)) {
       if (isDir(dst)) {
         dstStr += "/" + new File(srcStr).getName();
         dstStr += "/" + new File(srcStr).getName();
       }
       }
-      if( rootDir.getNode(dstStr.toString()) != null ) {
+      if (rootDir.getNode(dstStr.toString()) != null) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                                     +"failed to rename "+src+" to "+dstStr+ " because destination exists" );
+                                     +"failed to rename "+src+" to "+dstStr+ " because destination exists");
         return false;
         return false;
       }
       }
       renamedNode.removeNode();
       renamedNode.removeNode();
             
             
       // the renamed node can be reused now
       // the renamed node can be reused now
       try {
       try {
-        if( rootDir.addNode(dstStr, renamedNode ) != null ) {
+        if (rootDir.addNode(dstStr, renamedNode) != null) {
           NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: "
           NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: "
-                                        +src+" is renamed to "+dst );
+                                        +src+" is renamed to "+dst);
           return true;
           return true;
         }
         }
-      } catch (FileNotFoundException e ) {
+      } catch (FileNotFoundException e) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                                     +"failed to rename "+src+" to "+dst );
+                                     +"failed to rename "+src+" to "+dst);
         try {
         try {
           rootDir.addNode(srcStr, renamedNode); // put it back
           rootDir.addNode(srcStr, renamedNode); // put it back
         }catch(FileNotFoundException e2) {                
         }catch(FileNotFoundException e2) {                
@@ -497,33 +497,33 @@ class FSDirectory implements FSConstants {
    * @return array of file blocks
    * @return array of file blocks
    * @throws IOException
    * @throws IOException
    */
    */
-  Block[] setReplication( String src, 
-                          short replication,
-                          Vector<Integer> oldReplication
-                          ) throws IOException {
+  Block[] setReplication(String src, 
+                         short replication,
+                         Vector<Integer> oldReplication
+                         ) throws IOException {
     waitForReady();
     waitForReady();
-    Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication );
-    if( fileBlocks != null )  // log replication change
-      fsImage.getEditLog().logSetReplication( src, replication );
+    Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication);
+    if (fileBlocks != null)  // log replication change
+      fsImage.getEditLog().logSetReplication(src, replication);
     return fileBlocks;
     return fileBlocks;
   }
   }
 
 
-  Block[] unprotectedSetReplication(  String src, 
-                                      short replication,
-                                      Vector<Integer> oldReplication
-                                      ) throws IOException {
-    if( oldReplication == null )
+  Block[] unprotectedSetReplication( String src, 
+                                     short replication,
+                                     Vector<Integer> oldReplication
+                                     ) throws IOException {
+    if (oldReplication == null)
       oldReplication = new Vector<Integer>();
       oldReplication = new Vector<Integer>();
     oldReplication.setSize(1);
     oldReplication.setSize(1);
-    oldReplication.set( 0, new Integer(-1) );
+    oldReplication.set(0, new Integer(-1));
     Block[] fileBlocks = null;
     Block[] fileBlocks = null;
     synchronized(rootDir) {
     synchronized(rootDir) {
       INode fileNode = rootDir.getNode(src);
       INode fileNode = rootDir.getNode(src);
       if (fileNode == null)
       if (fileNode == null)
         return null;
         return null;
-      if( fileNode.isDir() )
+      if (fileNode.isDir())
         return null;
         return null;
-      oldReplication.set( 0, new Integer( fileNode.blockReplication ));
+      oldReplication.set(0, new Integer(fileNode.blockReplication));
       fileNode.blockReplication = replication;
       fileNode.blockReplication = replication;
       fileBlocks = fileNode.blocks;
       fileBlocks = fileNode.blocks;
     }
     }
@@ -555,11 +555,11 @@ class FSDirectory implements FSConstants {
    */
    */
   public Block[] delete(UTF8 src) {
   public Block[] delete(UTF8 src) {
     NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "
     NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "
-                                  +src );
+                                  +src);
     waitForReady();
     waitForReady();
     Block[] blocks = unprotectedDelete(src); 
     Block[] blocks = unprotectedDelete(src); 
-    if( blocks != null )
-      fsImage.getEditLog().logDelete( src );
+    if (blocks != null)
+      fsImage.getEditLog().logDelete(src);
     return blocks;
     return blocks;
   }
   }
 
 
@@ -570,20 +570,20 @@ class FSDirectory implements FSConstants {
       INode targetNode = rootDir.getNode(src.toString());
       INode targetNode = rootDir.getNode(src.toString());
       if (targetNode == null) {
       if (targetNode == null) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
-                                     +"failed to remove "+src+" because it does not exist" );
+                                     +"failed to remove "+src+" because it does not exist");
         return null;
         return null;
       } else {
       } else {
         //
         //
         // Remove the node from the namespace and GC all
         // Remove the node from the namespace and GC all
         // the blocks underneath the node.
         // the blocks underneath the node.
         //
         //
-        if (! targetNode.removeNode()) {
+        if (!targetNode.removeNode()) {
           NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
           NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
-                                       +"failed to remove "+src+" because it does not have a parent" );
+                                       +"failed to remove "+src+" because it does not have a parent");
           return null;
           return null;
         } else {
         } else {
           NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
           NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
-                                        +src+" is removed" );
+                                        +src+" is removed");
           Vector<Block> v = new Vector<Block>();
           Vector<Block> v = new Vector<Block>();
           targetNode.collectSubtreeBlocks(v);
           targetNode.collectSubtreeBlocks(v);
           for (Block b : v) {
           for (Block b : v) {
@@ -675,7 +675,7 @@ class FSDirectory implements FSConstants {
     String srcs = normalizePath(src);
     String srcs = normalizePath(src);
     synchronized (rootDir) {
     synchronized (rootDir) {
       if (srcs.startsWith("/") && 
       if (srcs.startsWith("/") && 
-          ! srcs.endsWith("/") && 
+          !srcs.endsWith("/") && 
           rootDir.getNode(srcs) == null) {
           rootDir.getNode(srcs) == null) {
         return true;
         return true;
       } else {
       } else {
@@ -722,8 +722,8 @@ class FSDirectory implements FSConstants {
         INode inserted = unprotectedMkdir(cur);
         INode inserted = unprotectedMkdir(cur);
         if (inserted != null) {
         if (inserted != null) {
           NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
           NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
-                                        +"created directory "+cur );
-          fsImage.getEditLog().logMkDir( inserted );
+                                        +"created directory "+cur);
+          fsImage.getEditLog().logMkDir(inserted);
         } else { // otherwise cur exists, verify that it is a directory
         } else { // otherwise cur exists, verify that it is a directory
           if (!isDir(new UTF8(cur))) {
           if (!isDir(new UTF8(cur))) {
             NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
             NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
@@ -731,7 +731,7 @@ class FSDirectory implements FSConstants {
             return false;
             return false;
           } 
           } 
         }
         }
-      } catch (FileNotFoundException e ) {
+      } catch (FileNotFoundException e) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
         NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
                                       +"failed to create directory "+src);
                                       +"failed to create directory "+src);
         return false;
         return false;

+ 77 - 77
src/java/org/apache/hadoop/dfs/FSEditLog.java

@@ -52,8 +52,8 @@ class FSEditLog {
   static class EditLogOutputStream extends DataOutputStream {
   static class EditLogOutputStream extends DataOutputStream {
     private FileDescriptor fd;
     private FileDescriptor fd;
 
 
-    EditLogOutputStream( File name ) throws IOException {
-      super( new FileOutputStream( name, true )); // open for append
+    EditLogOutputStream(File name) throws IOException {
+      super(new FileOutputStream(name, true)); // open for append
       this.fd = ((FileOutputStream)out).getFD();
       this.fd = ((FileOutputStream)out).getFD();
     }
     }
 
 
@@ -63,21 +63,21 @@ class FSEditLog {
     }
     }
 
 
     void create() throws IOException {
     void create() throws IOException {
-      writeInt( FSConstants.LAYOUT_VERSION );
+      writeInt(FSConstants.LAYOUT_VERSION);
       flushAndSync();
       flushAndSync();
     }
     }
   }
   }
 
 
-  FSEditLog( FSImage image ) {
+  FSEditLog(FSImage image) {
     fsimage = image;
     fsimage = image;
   }
   }
 
 
-  private File getEditFile( int idx ) {
-    return fsimage.getEditFile( idx );
+  private File getEditFile(int idx) {
+    return fsimage.getEditFile(idx);
   }
   }
 
 
-  private File getEditNewFile( int idx ) {
-    return fsimage.getEditNewFile( idx );
+  private File getEditNewFile(int idx) {
+    return fsimage.getEditNewFile(idx);
   }
   }
   
   
   private int getNumStorageDirs() {
   private int getNumStorageDirs() {
@@ -96,23 +96,23 @@ class FSEditLog {
    */
    */
   void open() throws IOException {
   void open() throws IOException {
     int size = getNumStorageDirs();
     int size = getNumStorageDirs();
-    if( editStreams == null )
-      editStreams = new ArrayList<EditLogOutputStream>( size );
+    if (editStreams == null)
+      editStreams = new ArrayList<EditLogOutputStream>(size);
     for (int idx = 0; idx < size; idx++) {
     for (int idx = 0; idx < size; idx++) {
-      File eFile = getEditFile( idx );
+      File eFile = getEditFile(idx);
       try {
       try {
-        EditLogOutputStream eStream = new EditLogOutputStream( eFile );
-        editStreams.add( eStream );
+        EditLogOutputStream eStream = new EditLogOutputStream(eFile);
+        editStreams.add(eStream);
       } catch (IOException e) {
       } catch (IOException e) {
-        FSNamesystem.LOG.warn( "Unable to open edit log file " + eFile );
+        FSNamesystem.LOG.warn("Unable to open edit log file " + eFile);
         processIOError(idx); 
         processIOError(idx); 
         idx--; 
         idx--; 
       }
       }
     }
     }
   }
   }
 
 
-  void createEditLogFile( File name ) throws IOException {
-    EditLogOutputStream eStream = new EditLogOutputStream( name );
+  void createEditLogFile(File name) throws IOException {
+    EditLogOutputStream eStream = new EditLogOutputStream(name);
     eStream.create();
     eStream.create();
     eStream.flushAndSync();
     eStream.flushAndSync();
     eStream.close();
     eStream.close();
@@ -123,9 +123,9 @@ class FSEditLog {
    */
    */
   void createNewIfMissing() throws IOException {
   void createNewIfMissing() throws IOException {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      File newFile = getEditNewFile( idx );
-      if( ! newFile.exists() )
-        createEditLogFile( newFile );
+      File newFile = getEditNewFile(idx);
+      if (!newFile.exists())
+        createEditLogFile(newFile);
     }
     }
   }
   }
   
   
@@ -137,7 +137,7 @@ class FSEditLog {
       return;
       return;
     }
     }
     for (int idx = 0; idx < editStreams.size(); idx++) {
     for (int idx = 0; idx < editStreams.size(); idx++) {
-      EditLogOutputStream eStream = editStreams.get( idx );
+      EditLogOutputStream eStream = editStreams.get(idx);
       try {
       try {
         eStream.flushAndSync();
         eStream.flushAndSync();
         eStream.close();
         eStream.close();
@@ -162,7 +162,7 @@ class FSEditLog {
     assert(index < getNumStorageDirs());
     assert(index < getNumStorageDirs());
     assert(getNumStorageDirs() == editStreams.size());
     assert(getNumStorageDirs() == editStreams.size());
 
 
-    editStreams.remove( index );
+    editStreams.remove(index);
     //
     //
     // Invoke the ioerror routine of the fsimage
     // Invoke the ioerror routine of the fsimage
     //
     //
@@ -174,7 +174,7 @@ class FSEditLog {
    */
    */
   boolean existsNew() throws IOException {
   boolean existsNew() throws IOException {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      if (getEditNewFile( idx ).exists()) { 
+      if (getEditNewFile(idx).exists()) { 
         return true;
         return true;
       }
       }
     }
     }
@@ -186,7 +186,7 @@ class FSEditLog {
    * This is where we apply edits that we've been writing to disk all
    * This is where we apply edits that we've been writing to disk all
    * along.
    * along.
    */
    */
-  int loadFSEdits( File edits ) throws IOException {
+  int loadFSEdits(File edits) throws IOException {
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     FSDirectory fsDir = fsNamesys.dir;
     FSDirectory fsDir = fsNamesys.dir;
     int numEdits = 0;
     int numEdits = 0;
@@ -197,7 +197,7 @@ class FSEditLog {
                                                new BufferedInputStream(
                                                new BufferedInputStream(
                                                                        new FileInputStream(edits)));
                                                                        new FileInputStream(edits)));
       // Read log file version. Could be missing. 
       // Read log file version. Could be missing. 
-      in.mark( 4 );
+      in.mark(4);
       // If edits log is greater than 2G, available method will return negative
       // If edits log is greater than 2G, available method will return negative
       // numbers, so we avoid having to call available
       // numbers, so we avoid having to call available
       boolean available = true;
       boolean available = true;
@@ -208,16 +208,16 @@ class FSEditLog {
       }
       }
       if (available) {
       if (available) {
         in.reset();
         in.reset();
-        if( logVersion >= 0 )
+        if (logVersion >= 0)
           logVersion = 0;
           logVersion = 0;
         else
         else
           logVersion = in.readInt();
           logVersion = in.readInt();
-        if( logVersion < FSConstants.LAYOUT_VERSION ) // future version
+        if (logVersion < FSConstants.LAYOUT_VERSION) // future version
           throw new IOException(
           throw new IOException(
                                 "Unexpected version of the file system log file: "
                                 "Unexpected version of the file system log file: "
                                 + logVersion
                                 + logVersion
                                 + ". Current version = " 
                                 + ". Current version = " 
-                                + FSConstants.LAYOUT_VERSION + "." );
+                                + FSConstants.LAYOUT_VERSION + ".");
       }
       }
       
       
       short replication = fsNamesys.getDefaultReplication();
       short replication = fsNamesys.getDefaultReplication();
@@ -236,20 +236,20 @@ class FSEditLog {
             ArrayWritable aw = null;
             ArrayWritable aw = null;
             Writable writables[];
             Writable writables[];
             // version 0 does not support per file replication
             // version 0 does not support per file replication
-            if( logVersion >= 0 )
+            if (logVersion >= 0)
               name.readFields(in);  // read name only
               name.readFields(in);  // read name only
             else {  // other versions do
             else {  // other versions do
               // get name and replication
               // get name and replication
               aw = new ArrayWritable(UTF8.class);
               aw = new ArrayWritable(UTF8.class);
               aw.readFields(in);
               aw.readFields(in);
               writables = aw.get(); 
               writables = aw.get(); 
-              if( writables.length != 2 )
+              if (writables.length != 2)
                 throw new IOException("Incorrect data fortmat. " 
                 throw new IOException("Incorrect data fortmat. " 
                                       + "Name & replication pair expected");
                                       + "Name & replication pair expected");
               name = (UTF8) writables[0];
               name = (UTF8) writables[0];
               replication = Short.parseShort(
               replication = Short.parseShort(
                                              ((UTF8)writables[1]).toString());
                                              ((UTF8)writables[1]).toString());
-              replication = adjustReplication( replication );
+              replication = adjustReplication(replication);
             }
             }
             // get blocks
             // get blocks
             aw = new ArrayWritable(Block.class);
             aw = new ArrayWritable(Block.class);
@@ -258,7 +258,7 @@ class FSEditLog {
             Block blocks[] = new Block[writables.length];
             Block blocks[] = new Block[writables.length];
             System.arraycopy(writables, 0, blocks, 0, blocks.length);
             System.arraycopy(writables, 0, blocks, 0, blocks.length);
             // add to the file tree
             // add to the file tree
-            fsDir.unprotectedAddFile(name, blocks, replication );
+            fsDir.unprotectedAddFile(name, blocks, replication);
             break;
             break;
           }
           }
           case OP_SET_REPLICATION: {
           case OP_SET_REPLICATION: {
@@ -266,7 +266,7 @@ class FSEditLog {
             UTF8 repl = new UTF8();
             UTF8 repl = new UTF8();
             src.readFields(in);
             src.readFields(in);
             repl.readFields(in);
             repl.readFields(in);
-            replication = adjustReplication( fromLogReplication(repl) );
+            replication = adjustReplication(fromLogReplication(repl));
             fsDir.unprotectedSetReplication(src.toString(), 
             fsDir.unprotectedSetReplication(src.toString(), 
                                             replication,
                                             replication,
                                             null);
                                             null);
@@ -293,26 +293,26 @@ class FSEditLog {
             break;
             break;
           }
           }
           case OP_DATANODE_ADD: {
           case OP_DATANODE_ADD: {
-            if( logVersion > -3 )
+            if (logVersion > -3)
               throw new IOException("Unexpected opcode " + opcode 
               throw new IOException("Unexpected opcode " + opcode 
-                                    + " for version " + logVersion );
+                                    + " for version " + logVersion);
             FSImage.DatanodeImage nodeimage = new FSImage.DatanodeImage();
             FSImage.DatanodeImage nodeimage = new FSImage.DatanodeImage();
             nodeimage.readFields(in);
             nodeimage.readFields(in);
             DatanodeDescriptor node = nodeimage.getDatanodeDescriptor();
             DatanodeDescriptor node = nodeimage.getDatanodeDescriptor();
-            fsNamesys.unprotectedAddDatanode( node );
+            fsNamesys.unprotectedAddDatanode(node);
             break;
             break;
           }
           }
           case OP_DATANODE_REMOVE: {
           case OP_DATANODE_REMOVE: {
-            if( logVersion > -3 )
+            if (logVersion > -3)
               throw new IOException("Unexpected opcode " + opcode 
               throw new IOException("Unexpected opcode " + opcode 
-                                    + " for version " + logVersion );
+                                    + " for version " + logVersion);
             DatanodeID nodeID = new DatanodeID();
             DatanodeID nodeID = new DatanodeID();
             nodeID.readFields(in);
             nodeID.readFields(in);
-            DatanodeDescriptor node = fsNamesys.getDatanode( nodeID );
-            if( node != null ) {
-              fsNamesys.unprotectedRemoveDatanode( node );
+            DatanodeDescriptor node = fsNamesys.getDatanode(nodeID);
+            if (node != null) {
+              fsNamesys.unprotectedRemoveDatanode(node);
               // physically remove node from datanodeMap
               // physically remove node from datanodeMap
-              fsNamesys.wipeDatanode( nodeID );
+              fsNamesys.wipeDatanode(nodeID);
             }
             }
             break;
             break;
           }
           }
@@ -326,19 +326,19 @@ class FSEditLog {
       }
       }
     }
     }
     
     
-    if( logVersion != FSConstants.LAYOUT_VERSION ) // other version
+    if (logVersion != FSConstants.LAYOUT_VERSION) // other version
       numEdits++; // save this image asap
       numEdits++; // save this image asap
     return numEdits;
     return numEdits;
   }
   }
   
   
-  static short adjustReplication( short replication) {
+  static short adjustReplication(short replication) {
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     short minReplication = fsNamesys.getMinReplication();
     short minReplication = fsNamesys.getMinReplication();
-    if( replication<minReplication ) {
+    if (replication<minReplication) {
       replication = minReplication;
       replication = minReplication;
     }
     }
     short maxReplication = fsNamesys.getMaxReplication();
     short maxReplication = fsNamesys.getMaxReplication();
-    if( replication>maxReplication ) {
+    if (replication>maxReplication) {
       replication = maxReplication;
       replication = maxReplication;
     }
     }
     return replication;
     return replication;
@@ -351,14 +351,14 @@ class FSEditLog {
     assert this.getNumEditStreams() > 0 : "no editlog streams";
     assert this.getNumEditStreams() > 0 : "no editlog streams";
     for (int idx = 0; idx < editStreams.size(); idx++) {
     for (int idx = 0; idx < editStreams.size(); idx++) {
       EditLogOutputStream eStream;
       EditLogOutputStream eStream;
-      synchronized ( eStream = editStreams.get( idx ) ) {
+      synchronized (eStream = editStreams.get(idx)) {
         try {
         try {
           eStream.write(op);
           eStream.write(op);
           if (w1 != null) {
           if (w1 != null) {
-            w1.write( eStream );
+            w1.write(eStream);
           }
           }
           if (w2 != null) {
           if (w2 != null) {
-            w2.write( eStream );
+            w2.write(eStream);
           }
           }
           eStream.flushAndSync();
           eStream.flushAndSync();
         } catch (IOException ie) {
         } catch (IOException ie) {
@@ -377,43 +377,43 @@ class FSEditLog {
   /** 
   /** 
    * Add create file record to edit log
    * Add create file record to edit log
    */
    */
-  void logCreateFile( FSDirectory.INode newNode ) {
+  void logCreateFile(FSDirectory.INode newNode) {
     UTF8 nameReplicationPair[] = new UTF8[] { 
     UTF8 nameReplicationPair[] = new UTF8[] { 
-      new UTF8( newNode.computeName() ), 
-      FSEditLog.toLogReplication( newNode.getReplication() )};
+      new UTF8(newNode.computeName()), 
+      FSEditLog.toLogReplication(newNode.getReplication())};
     logEdit(OP_ADD,
     logEdit(OP_ADD,
-            new ArrayWritable( UTF8.class, nameReplicationPair ), 
-            new ArrayWritable( Block.class, newNode.getBlocks() ));
+            new ArrayWritable(UTF8.class, nameReplicationPair), 
+            new ArrayWritable(Block.class, newNode.getBlocks()));
   }
   }
   
   
   /** 
   /** 
    * Add create directory record to edit log
    * Add create directory record to edit log
    */
    */
-  void logMkDir( FSDirectory.INode newNode ) {
-    logEdit(OP_MKDIR, new UTF8( newNode.computeName() ), null );
+  void logMkDir(FSDirectory.INode newNode) {
+    logEdit(OP_MKDIR, new UTF8(newNode.computeName()), null);
   }
   }
   
   
   /** 
   /** 
    * Add rename record to edit log
    * Add rename record to edit log
    * TODO: use String parameters until just before writing to disk
    * TODO: use String parameters until just before writing to disk
    */
    */
-  void logRename( UTF8 src, UTF8 dst ) {
+  void logRename(UTF8 src, UTF8 dst) {
     logEdit(OP_RENAME, src, dst);
     logEdit(OP_RENAME, src, dst);
   }
   }
   
   
   /** 
   /** 
    * Add set replication record to edit log
    * Add set replication record to edit log
    */
    */
-  void logSetReplication( String src, short replication ) {
+  void logSetReplication(String src, short replication) {
     logEdit(OP_SET_REPLICATION, 
     logEdit(OP_SET_REPLICATION, 
             new UTF8(src), 
             new UTF8(src), 
-            FSEditLog.toLogReplication( replication ));
+            FSEditLog.toLogReplication(replication));
   }
   }
   
   
   /** 
   /** 
    * Add delete file record to edit log
    * Add delete file record to edit log
    */
    */
-  void logDelete( UTF8 src ) {
+  void logDelete(UTF8 src) {
     logEdit(OP_DELETE, src, null);
     logEdit(OP_DELETE, src, null);
   }
   }
   
   
@@ -421,23 +421,23 @@ class FSEditLog {
    * Creates a record in edit log corresponding to a new data node
    * Creates a record in edit log corresponding to a new data node
    * registration event.
    * registration event.
    */
    */
-  void logAddDatanode( DatanodeDescriptor node ) {
-    logEdit( OP_DATANODE_ADD, new FSImage.DatanodeImage(node), null );
+  void logAddDatanode(DatanodeDescriptor node) {
+    logEdit(OP_DATANODE_ADD, new FSImage.DatanodeImage(node), null);
   }
   }
   
   
   /** 
   /** 
    * Creates a record in edit log corresponding to a data node
    * Creates a record in edit log corresponding to a data node
    * removal event.
    * removal event.
    */
    */
-  void logRemoveDatanode( DatanodeID nodeID ) {
-    logEdit( OP_DATANODE_REMOVE, new DatanodeID( nodeID ), null );
+  void logRemoveDatanode(DatanodeID nodeID) {
+    logEdit(OP_DATANODE_REMOVE, new DatanodeID(nodeID), null);
   }
   }
   
   
-  static UTF8 toLogReplication( short replication ) {
-    return new UTF8( Short.toString(replication));
+  static UTF8 toLogReplication(short replication) {
+    return new UTF8(Short.toString(replication));
   }
   }
   
   
-  static short fromLogReplication( UTF8 replication ) {
+  static short fromLogReplication(UTF8 replication) {
     return Short.parseShort(replication.toString());
     return Short.parseShort(replication.toString());
   }
   }
 
 
@@ -448,9 +448,9 @@ class FSEditLog {
     assert(getNumStorageDirs() == editStreams.size());
     assert(getNumStorageDirs() == editStreams.size());
     long size = 0;
     long size = 0;
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      synchronized (editStreams.get( idx )) {
-        assert(size == 0 || size == getEditFile( idx ).length());
-        size = getEditFile( idx ).length();
+      synchronized (editStreams.get(idx)) {
+        assert(size == 0 || size == getEditFile(idx).length());
+        size = getEditFile(idx).length();
       }
       }
     }
     }
     return size;
     return size;
@@ -472,11 +472,11 @@ class FSEditLog {
     //
     //
     // Open edits.new
     // Open edits.new
     //
     //
-    for (int idx = 0; idx < getNumStorageDirs(); idx++ ) {
+    for (int idx = 0; idx < getNumStorageDirs(); idx++) {
       try {
       try {
-        EditLogOutputStream eStream = new EditLogOutputStream( getEditNewFile( idx ));
+        EditLogOutputStream eStream = new EditLogOutputStream(getEditNewFile(idx));
         eStream.create();
         eStream.create();
-        editStreams.add( eStream );
+        editStreams.add(eStream);
       } catch (IOException e) {
       } catch (IOException e) {
         processIOError(idx);
         processIOError(idx);
         idx--;
         idx--;
@@ -501,14 +501,14 @@ class FSEditLog {
     //
     //
     // Delete edits and rename edits.new to edits.
     // Delete edits and rename edits.new to edits.
     //
     //
-    for (int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      if (!getEditNewFile( idx ).renameTo(getEditFile( idx ))) {
+    for (int idx = 0; idx < getNumStorageDirs(); idx++) {
+      if (!getEditNewFile(idx).renameTo(getEditFile(idx))) {
         //
         //
         // renameTo() fails on Windows if the destination
         // renameTo() fails on Windows if the destination
         // file exists.
         // file exists.
         //
         //
-        getEditFile( idx ).delete();
-        if (!getEditNewFile( idx ).renameTo(getEditFile( idx ))) {
+        getEditFile(idx).delete();
+        if (!getEditNewFile(idx).renameTo(getEditFile(idx))) {
           processIOError(idx); 
           processIOError(idx); 
           idx--; 
           idx--; 
         }
         }
@@ -524,6 +524,6 @@ class FSEditLog {
    * Return the name of the edit file
    * Return the name of the edit file
    */
    */
   File getFsEditName() throws IOException {
   File getFsEditName() throws IOException {
-    return getEditFile( 0 );
+    return getEditFile(0);
   }
   }
 }
 }

+ 235 - 235
src/java/org/apache/hadoop/dfs/FSImage.java

@@ -59,7 +59,7 @@ class FSImage extends Storage {
     EDITS_NEW ("edits.new");
     EDITS_NEW ("edits.new");
     
     
     private String fileName = null;
     private String fileName = null;
-    private NameNodeFile( String name ) {this.fileName = name;}
+    private NameNodeFile(String name) {this.fileName = name;}
     String getName() {return fileName;}
     String getName() {return fileName;}
   }
   }
   
   
@@ -70,53 +70,53 @@ class FSImage extends Storage {
   /**
   /**
    */
    */
   FSImage() {
   FSImage() {
-    super( NodeType.NAME_NODE );
-    this.editLog = new FSEditLog( this );
+    super(NodeType.NAME_NODE);
+    this.editLog = new FSEditLog(this);
   }
   }
 
 
   /**
   /**
    */
    */
-  FSImage( Collection<File> fsDirs ) throws IOException {
+  FSImage(Collection<File> fsDirs) throws IOException {
     this();
     this();
-    setStorageDirectories( fsDirs );
+    setStorageDirectories(fsDirs);
   }
   }
 
 
-  FSImage( StorageInfo storageInfo ) {
-    super( NodeType.NAME_NODE, storageInfo );
+  FSImage(StorageInfo storageInfo) {
+    super(NodeType.NAME_NODE, storageInfo);
   }
   }
 
 
   /**
   /**
    * Represents an Image (image and edit file).
    * Represents an Image (image and edit file).
    */
    */
-  FSImage( File imageDir ) throws IOException {
+  FSImage(File imageDir) throws IOException {
     this();
     this();
     ArrayList<File> dirs = new ArrayList<File>(1);
     ArrayList<File> dirs = new ArrayList<File>(1);
-    dirs.add( imageDir );
-    setStorageDirectories( dirs );
+    dirs.add(imageDir);
+    setStorageDirectories(dirs);
   }
   }
   
   
-  void setStorageDirectories( Collection<File> fsDirs ) throws IOException {
-    this.storageDirs = new ArrayList<StorageDirectory>( fsDirs.size() );
-    for( Iterator<File> it = fsDirs.iterator(); it.hasNext(); )
-      this.addStorageDir( new StorageDirectory( it.next() ));
+  void setStorageDirectories(Collection<File> fsDirs) throws IOException {
+    this.storageDirs = new ArrayList<StorageDirectory>(fsDirs.size());
+    for(Iterator<File> it = fsDirs.iterator(); it.hasNext();)
+      this.addStorageDir(new StorageDirectory(it.next()));
   }
   }
 
 
   /**
   /**
    */
    */
-  File getImageFile( int imageDirIdx, NameNodeFile type ) {
-    return getImageFile( getStorageDir( imageDirIdx ), type );
+  File getImageFile(int imageDirIdx, NameNodeFile type) {
+    return getImageFile(getStorageDir(imageDirIdx), type);
   }
   }
   
   
-  static File getImageFile( StorageDirectory sd, NameNodeFile type ) {
-    return new File( sd.getCurrentDir(), type.getName() );
+  static File getImageFile(StorageDirectory sd, NameNodeFile type) {
+    return new File(sd.getCurrentDir(), type.getName());
   }
   }
   
   
-  File getEditFile( int idx ) {
-    return getImageFile( idx, NameNodeFile.EDITS );
+  File getEditFile(int idx) {
+    return getImageFile(idx, NameNodeFile.EDITS);
   }
   }
   
   
-  File getEditNewFile( int idx ) {
-    return getImageFile( idx, NameNodeFile.EDITS_NEW );
+  File getEditNewFile(int idx) {
+    return getImageFile(idx, NameNodeFile.EDITS_NEW);
   }
   }
   
   
   /**
   /**
@@ -129,42 +129,42 @@ class FSImage extends Storage {
    * @param startOpt startup option
    * @param startOpt startup option
    * @throws IOException
    * @throws IOException
    */
    */
-  void recoverTransitionRead( Collection<File> dataDirs,
-                              StartupOption startOpt
-                              ) throws IOException {
+  void recoverTransitionRead(Collection<File> dataDirs,
+                             StartupOption startOpt
+                             ) throws IOException {
     assert startOpt != StartupOption.FORMAT : 
     assert startOpt != StartupOption.FORMAT : 
       "NameNode formatting should be performed before reading the image";
       "NameNode formatting should be performed before reading the image";
     // 1. For each data directory calculate its state and 
     // 1. For each data directory calculate its state and 
     // check whether all is consistent before transitioning.
     // check whether all is consistent before transitioning.
-    this.storageDirs = new ArrayList<StorageDirectory>( dataDirs.size() );
+    this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
     AbstractList<StorageState> dataDirStates = 
     AbstractList<StorageState> dataDirStates = 
-      new ArrayList<StorageState>( dataDirs.size() );
+      new ArrayList<StorageState>(dataDirs.size());
     boolean isFormatted = false;
     boolean isFormatted = false;
-    for( Iterator<File> it = dataDirs.iterator(); it.hasNext(); ) {
+    for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
       File dataDir = it.next();
       File dataDir = it.next();
-      StorageDirectory sd = new StorageDirectory( dataDir );
+      StorageDirectory sd = new StorageDirectory(dataDir);
       StorageState curState;
       StorageState curState;
       try {
       try {
-        curState = sd.analyzeStorage( startOpt );
+        curState = sd.analyzeStorage(startOpt);
         // sd is locked but not opened
         // sd is locked but not opened
-        switch( curState ) {
+        switch(curState) {
         case NON_EXISTENT:
         case NON_EXISTENT:
           // name-node fails if any of the configured storage dirs are missing
           // name-node fails if any of the configured storage dirs are missing
-          throw new InconsistentFSStateException( sd.root,
-                                                  "storage directory does not exist or is not accessible." );
+          throw new InconsistentFSStateException(sd.root,
+                                                 "storage directory does not exist or is not accessible.");
         case NOT_FORMATTED:
         case NOT_FORMATTED:
           break;
           break;
         case CONVERT:
         case CONVERT:
-          if( convertLayout( sd ) ) // need to reformat empty image
+          if (convertLayout(sd)) // need to reformat empty image
             curState = StorageState.NOT_FORMATTED;
             curState = StorageState.NOT_FORMATTED;
           break;
           break;
         case NORMAL:
         case NORMAL:
           break;
           break;
         default:  // recovery is possible
         default:  // recovery is possible
-          sd.doRecover( curState );      
+          sd.doRecover(curState);      
         }
         }
-        if( curState != StorageState.NOT_FORMATTED 
-            && startOpt != StartupOption.ROLLBACK ) {
+        if (curState != StorageState.NOT_FORMATTED 
+            && startOpt != StartupOption.ROLLBACK) {
           sd.read(); // read and verify consistency with other directories
           sd.read(); // read and verify consistency with other directories
           isFormatted = true;
           isFormatted = true;
         }
         }
@@ -173,34 +173,34 @@ class FSImage extends Storage {
         throw ioe;
         throw ioe;
       }
       }
       // add to the storage list
       // add to the storage list
-      addStorageDir( sd );
-      dataDirStates.add( curState );
+      addStorageDir(sd);
+      dataDirStates.add(curState);
     }
     }
 
 
-    if( dataDirs.size() == 0 )  // none of the data dirs exist
-      throw new IOException( 
-                            "All specified directories are not accessible or do not exist." );
-    if( ! isFormatted && startOpt != StartupOption.ROLLBACK )
-      throw new IOException( "NameNode is not formatted." );
-    if( startOpt != StartupOption.UPGRADE
+    if (dataDirs.size() == 0)  // none of the data dirs exist
+      throw new IOException(
+                            "All specified directories are not accessible or do not exist.");
+    if (!isFormatted && startOpt != StartupOption.ROLLBACK)
+      throw new IOException("NameNode is not formatted.");
+    if (startOpt != StartupOption.UPGRADE
         && layoutVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION
         && layoutVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION
-        && layoutVersion != FSConstants.LAYOUT_VERSION )
-      throw new IOException( 
+        && layoutVersion != FSConstants.LAYOUT_VERSION)
+      throw new IOException(
                             "\nFile system image contains an old layout version " + layoutVersion
                             "\nFile system image contains an old layout version " + layoutVersion
                             + ".\nAn upgrade to version " + FSConstants.LAYOUT_VERSION
                             + ".\nAn upgrade to version " + FSConstants.LAYOUT_VERSION
-                            + " is required.\nPlease restart NameNode with -upgrade option." );
+                            + " is required.\nPlease restart NameNode with -upgrade option.");
 
 
     // 2. Format unformatted dirs.
     // 2. Format unformatted dirs.
     this.checkpointTime = 0L;
     this.checkpointTime = 0L;
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      StorageState curState = dataDirStates.get( idx );
-      switch( curState ) {
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      StorageState curState = dataDirStates.get(idx);
+      switch(curState) {
       case NON_EXISTENT:
       case NON_EXISTENT:
         assert false : StorageState.NON_EXISTENT + " state cannot be here";
         assert false : StorageState.NON_EXISTENT + " state cannot be here";
       case NOT_FORMATTED:
       case NOT_FORMATTED:
-        LOG.info( "Storage directory " + sd.root + " is not formatted." );
-        LOG.info( "Formatting ..." );
+        LOG.info("Storage directory " + sd.root + " is not formatted.");
+        LOG.info("Formatting ...");
         sd.clearDirectory(); // create empty currrent dir
         sd.clearDirectory(); // create empty currrent dir
         break;
         break;
       default:
       default:
@@ -209,7 +209,7 @@ class FSImage extends Storage {
     }
     }
 
 
     // 3. Do transitions
     // 3. Do transitions
-    switch( startOpt ) {
+    switch(startOpt) {
     case UPGRADE:
     case UPGRADE:
       doUpgrade();
       doUpgrade();
       break;
       break;
@@ -217,7 +217,7 @@ class FSImage extends Storage {
       doRollback();
       doRollback();
       // and now load that image
       // and now load that image
     case REGULAR:
     case REGULAR:
-      if( loadFSImage() )
+      if (loadFSImage())
         saveFSImage();
         saveFSImage();
       else
       else
         editLog.open();
         editLog.open();
@@ -229,12 +229,12 @@ class FSImage extends Storage {
   private void doUpgrade() throws IOException {
   private void doUpgrade() throws IOException {
     // Upgrade is allowed only if there are 
     // Upgrade is allowed only if there are 
     // no previous fs states in any of the directories
     // no previous fs states in any of the directories
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      if( sd.getPreviousDir().exists() )
-        throw new InconsistentFSStateException( sd.root,
-                                                "previous fs state should not exist during upgrade. "
-                                                + "Finalize or rollback first." );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      if (sd.getPreviousDir().exists())
+        throw new InconsistentFSStateException(sd.root,
+                                               "previous fs state should not exist during upgrade. "
+                                               + "Finalize or rollback first.");
     }
     }
 
 
     // load the latest image
     // load the latest image
@@ -246,32 +246,32 @@ class FSImage extends Storage {
     int oldLV = this.getLayoutVersion();
     int oldLV = this.getLayoutVersion();
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.checkpointTime = FSNamesystem.now();
     this.checkpointTime = FSNamesystem.now();
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      LOG.info( "Upgrading image directory " + sd.root 
-                + ".\n   old LV = " + oldLV
-                + "; old CTime = " + oldCTime
-                + ".\n   new LV = " + this.getLayoutVersion()
-                + "; new CTime = " + this.getCTime() );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      LOG.info("Upgrading image directory " + sd.root 
+               + ".\n   old LV = " + oldLV
+               + "; old CTime = " + oldCTime
+               + ".\n   new LV = " + this.getLayoutVersion()
+               + "; new CTime = " + this.getCTime());
       File curDir = sd.getCurrentDir();
       File curDir = sd.getCurrentDir();
       File prevDir = sd.getPreviousDir();
       File prevDir = sd.getPreviousDir();
       File tmpDir = sd.getPreviousTmp();
       File tmpDir = sd.getPreviousTmp();
       assert curDir.exists() : "Current directory must exist.";
       assert curDir.exists() : "Current directory must exist.";
-      assert ! prevDir.exists() : "prvious directory must not exist.";
-      assert ! tmpDir.exists() : "prvious.tmp directory must not exist.";
+      assert !prevDir.exists() : "prvious directory must not exist.";
+      assert !tmpDir.exists() : "prvious.tmp directory must not exist.";
       // rename current to tmp
       // rename current to tmp
-      rename( curDir, tmpDir );
+      rename(curDir, tmpDir);
       // save new image
       // save new image
-      if( ! curDir.mkdir() )
-        throw new IOException("Cannot create directory " + curDir );
-      saveFSImage( getImageFile( sd, NameNodeFile.IMAGE ));
-      editLog.createEditLogFile( getImageFile( sd, NameNodeFile.EDITS ));
+      if (!curDir.mkdir())
+        throw new IOException("Cannot create directory " + curDir);
+      saveFSImage(getImageFile(sd, NameNodeFile.IMAGE));
+      editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
       // write version and time files
       // write version and time files
       sd.write();
       sd.write();
       // rename tmp to previous
       // rename tmp to previous
-      rename( tmpDir, prevDir );
+      rename(tmpDir, prevDir);
       isUpgradeFinalized = false;
       isUpgradeFinalized = false;
-      LOG.info( "Upgrade of " + sd.root + " is complete." );
+      LOG.info("Upgrade of " + sd.root + " is complete.");
     }
     }
     editLog.open();
     editLog.open();
   }
   }
@@ -283,91 +283,91 @@ class FSImage extends Storage {
     boolean canRollback = false;
     boolean canRollback = false;
     FSImage prevState = new FSImage();
     FSImage prevState = new FSImage();
     prevState.layoutVersion = FSConstants.LAYOUT_VERSION;
     prevState.layoutVersion = FSConstants.LAYOUT_VERSION;
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
       File prevDir = sd.getPreviousDir();
       File prevDir = sd.getPreviousDir();
-      if( ! prevDir.exists() ) {  // use current directory then
-        LOG.info( "Storage directory " + sd.root
-                  + " does not contain previous fs state." );
+      if (!prevDir.exists()) {  // use current directory then
+        LOG.info("Storage directory " + sd.root
+                 + " does not contain previous fs state.");
         sd.read(); // read and verify consistency with other directories
         sd.read(); // read and verify consistency with other directories
         continue;
         continue;
       }
       }
-      StorageDirectory sdPrev = prevState.new StorageDirectory( sd.root );
-      sdPrev.read( sdPrev.getPreviousVersionFile() );  // read and verify consistency of the prev dir
+      StorageDirectory sdPrev = prevState.new StorageDirectory(sd.root);
+      sdPrev.read(sdPrev.getPreviousVersionFile());  // read and verify consistency of the prev dir
       canRollback = true;
       canRollback = true;
     }
     }
-    if( ! canRollback )
-      throw new IOException( "Cannot rollback. " 
-                             + "None of the storage directories contain previous fs state." );
+    if (!canRollback)
+      throw new IOException("Cannot rollback. " 
+                            + "None of the storage directories contain previous fs state.");
 
 
     // Now that we know all directories are going to be consistent
     // Now that we know all directories are going to be consistent
     // Do rollback for each directory containing previous state
     // Do rollback for each directory containing previous state
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
       File prevDir = sd.getPreviousDir();
       File prevDir = sd.getPreviousDir();
-      if( ! prevDir.exists() )
+      if (!prevDir.exists())
         continue;
         continue;
 
 
-      LOG.info( "Rolling back storage directory " + sd.root 
-                + ".\n   new LV = " + prevState.getLayoutVersion()
-                + "; new CTime = " + prevState.getCTime() );
+      LOG.info("Rolling back storage directory " + sd.root 
+               + ".\n   new LV = " + prevState.getLayoutVersion()
+               + "; new CTime = " + prevState.getCTime());
       File tmpDir = sd.getRemovedTmp();
       File tmpDir = sd.getRemovedTmp();
-      assert ! tmpDir.exists() : "removed.tmp directory must not exist.";
+      assert !tmpDir.exists() : "removed.tmp directory must not exist.";
       // rename current to tmp
       // rename current to tmp
       File curDir = sd.getCurrentDir();
       File curDir = sd.getCurrentDir();
       assert curDir.exists() : "Current directory must exist.";
       assert curDir.exists() : "Current directory must exist.";
-      rename( curDir, tmpDir );
+      rename(curDir, tmpDir);
       // rename previous to current
       // rename previous to current
-      rename( prevDir, curDir );
+      rename(prevDir, curDir);
 
 
       // delete tmp dir
       // delete tmp dir
-      deleteDir( tmpDir );
-      LOG.info( "Rollback of " + sd.root + " is complete." );
+      deleteDir(tmpDir);
+      LOG.info("Rollback of " + sd.root + " is complete.");
     }
     }
     isUpgradeFinalized = true;
     isUpgradeFinalized = true;
   }
   }
 
 
-  private void doFinalize( StorageDirectory sd ) throws IOException {
+  private void doFinalize(StorageDirectory sd) throws IOException {
     File prevDir = sd.getPreviousDir();
     File prevDir = sd.getPreviousDir();
-    if( ! prevDir.exists() )
+    if (!prevDir.exists())
       return; // already discarded
       return; // already discarded
-    LOG.info( "Finalizing upgrade for storage directory " 
-              + sd.root 
-              + ".\n   cur LV = " + this.getLayoutVersion()
-              + "; cur CTime = " + this.getCTime() );
+    LOG.info("Finalizing upgrade for storage directory " 
+             + sd.root 
+             + ".\n   cur LV = " + this.getLayoutVersion()
+             + "; cur CTime = " + this.getCTime());
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     final File tmpDir = sd.getFinalizedTmp();
     final File tmpDir = sd.getFinalizedTmp();
     // rename previous to tmp and remove
     // rename previous to tmp and remove
-    rename( prevDir, tmpDir );
-    deleteDir( tmpDir );
+    rename(prevDir, tmpDir);
+    deleteDir(tmpDir);
     isUpgradeFinalized = true;
     isUpgradeFinalized = true;
-    LOG.info( "Finalize upgrade for " + sd.root + " is complete." );
+    LOG.info("Finalize upgrade for " + sd.root + " is complete.");
   }
   }
 
 
   void finalizeUpgrade() throws IOException {
   void finalizeUpgrade() throws IOException {
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ )
-      doFinalize( getStorageDir( idx ));
+    for(int idx = 0; idx < getNumStorageDirs(); idx++)
+      doFinalize(getStorageDir(idx));
   }
   }
 
 
   boolean isUpgradeFinalized() {
   boolean isUpgradeFinalized() {
     return isUpgradeFinalized;
     return isUpgradeFinalized;
   }
   }
 
 
-  protected void getFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.getFields( props, sd );
-    if( layoutVersion == 0 )
+  protected void getFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.getFields(props, sd);
+    if (layoutVersion == 0)
       throw new IOException("NameNode directory " 
       throw new IOException("NameNode directory " 
-                            + sd.root + " is not formatted." );
-    this.checkpointTime = readCheckpointTime( sd );
+                            + sd.root + " is not formatted.");
+    this.checkpointTime = readCheckpointTime(sd);
   }
   }
 
 
-  long readCheckpointTime( StorageDirectory sd ) throws IOException {
-    File timeFile = getImageFile( sd, NameNodeFile.TIME );
+  long readCheckpointTime(StorageDirectory sd) throws IOException {
+    File timeFile = getImageFile(sd, NameNodeFile.TIME);
     long timeStamp = 0L;
     long timeStamp = 0L;
-    if( timeFile.exists() && timeFile.canRead() ) {
-      DataInputStream in = new DataInputStream( new FileInputStream(timeFile) );
+    if (timeFile.exists() && timeFile.canRead()) {
+      DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
       try {
       try {
         timeStamp = in.readLong();
         timeStamp = in.readLong();
       } finally {
       } finally {
@@ -387,11 +387,11 @@ class FSImage extends Storage {
    * @param sd storage directory
    * @param sd storage directory
    * @throws IOException
    * @throws IOException
    */
    */
-  protected void setFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.setFields( props, sd );
-    writeCheckpointTime( sd );
+  protected void setFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.setFields(props, sd);
+    writeCheckpointTime(sd);
   }
   }
 
 
   /**
   /**
@@ -400,15 +400,15 @@ class FSImage extends Storage {
    * @param sd
    * @param sd
    * @throws IOException
    * @throws IOException
    */
    */
-  void writeCheckpointTime( StorageDirectory sd ) throws IOException {
-    if( checkpointTime < 0L )
+  void writeCheckpointTime(StorageDirectory sd) throws IOException {
+    if (checkpointTime < 0L)
       return; // do not write negative time
       return; // do not write negative time
-    File timeFile = getImageFile( sd, NameNodeFile.TIME );
+    File timeFile = getImageFile(sd, NameNodeFile.TIME);
     if (timeFile.exists()) { timeFile.delete(); }
     if (timeFile.exists()) { timeFile.delete(); }
     DataOutputStream out = new DataOutputStream(
     DataOutputStream out = new DataOutputStream(
                                                 new FileOutputStream(timeFile));
                                                 new FileOutputStream(timeFile));
     try {
     try {
-      out.writeLong( checkpointTime );
+      out.writeLong(checkpointTime);
     } finally {
     } finally {
       out.close();
       out.close();
     }
     }
@@ -422,41 +422,41 @@ class FSImage extends Storage {
    */
    */
   void processIOError(int index) throws IOException {
   void processIOError(int index) throws IOException {
     int nrDirs = getNumStorageDirs();
     int nrDirs = getNumStorageDirs();
-    assert( index >= 0 && index < nrDirs );
-    if( nrDirs == 1 )
+    assert(index >= 0 && index < nrDirs);
+    if (nrDirs == 1)
       throw new IOException("Checkpoint directories inaccessible.");
       throw new IOException("Checkpoint directories inaccessible.");
-    storageDirs.remove( index );
+    storageDirs.remove(index);
   }
   }
 
 
   FSEditLog getEditLog() {
   FSEditLog getEditLog() {
     return editLog;
     return editLog;
   }
   }
 
 
-  boolean isConversionNeeded( StorageDirectory sd ) throws IOException {
-    File oldImageDir = new File( sd.root, "image" );
-    if( ! oldImageDir.exists() )
+  boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+    File oldImageDir = new File(sd.root, "image");
+    if (!oldImageDir.exists())
       return false;
       return false;
     // check consistency of the old storage
     // check consistency of the old storage
-    if( ! oldImageDir.isDirectory() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldImageDir + " is not a directory." );
-    if( ! oldImageDir.canWrite() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldImageDir + " is not writable." );
+    if (!oldImageDir.isDirectory())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldImageDir + " is not a directory.");
+    if (!oldImageDir.canWrite())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldImageDir + " is not writable.");
     return true;
     return true;
   }
   }
   
   
-  private boolean convertLayout( StorageDirectory sd ) throws IOException {
+  private boolean convertLayout(StorageDirectory sd) throws IOException {
     assert FSConstants.LAYOUT_VERSION < LAST_PRE_UPGRADE_LAYOUT_VERSION :
     assert FSConstants.LAYOUT_VERSION < LAST_PRE_UPGRADE_LAYOUT_VERSION :
       "Bad current layout version: FSConstants.LAYOUT_VERSION should decrease";
       "Bad current layout version: FSConstants.LAYOUT_VERSION should decrease";
-    File oldImageDir = new File( sd.root, "image" );
+    File oldImageDir = new File(sd.root, "image");
     assert oldImageDir.exists() : "Old image directory is missing";
     assert oldImageDir.exists() : "Old image directory is missing";
-    File oldImage = new File( oldImageDir, "fsimage" );
+    File oldImage = new File(oldImageDir, "fsimage");
     
     
-    LOG.info( "Old layout version directory " + oldImageDir
-              + " is found. New layout version is "
-              + FSConstants.LAYOUT_VERSION );
-    LOG.info( "Trying to convert ..." );
+    LOG.info("Old layout version directory " + oldImageDir
+             + " is found. New layout version is "
+             + FSConstants.LAYOUT_VERSION);
+    LOG.info("Trying to convert ...");
 
 
     // we did not use locking for the pre upgrade layout, so we cannot prevent 
     // we did not use locking for the pre upgrade layout, so we cannot prevent 
     // old name-nodes from running in the same directory as the new ones
     // old name-nodes from running in the same directory as the new ones
@@ -464,35 +464,35 @@ class FSImage extends Storage {
     // check new storage
     // check new storage
     File newImageDir = sd.getCurrentDir();
     File newImageDir = sd.getCurrentDir();
     File versionF = sd.getVersionFile();
     File versionF = sd.getVersionFile();
-    if( versionF.exists() )
-      throw new IOException( "Version file already exists: " + versionF );
-    if( newImageDir.exists() ) // // somebody created current dir manually
-      deleteDir( newImageDir );
+    if (versionF.exists())
+      throw new IOException("Version file already exists: " + versionF);
+    if (newImageDir.exists()) // // somebody created current dir manually
+      deleteDir(newImageDir);
 
 
     // move old image files into new location
     // move old image files into new location
-    rename( oldImageDir, newImageDir );
-    File oldEdits1 = new File( sd.root, "edits" );
+    rename(oldImageDir, newImageDir);
+    File oldEdits1 = new File(sd.root, "edits");
     // move old edits into data
     // move old edits into data
-    if( oldEdits1.exists() )
-      rename( oldEdits1, getImageFile( sd, NameNodeFile.EDITS ));
-    File oldEdits2 = new File( sd.root, "edits.new" );
-    if( oldEdits2.exists() )
-      rename( oldEdits2, getImageFile( sd, NameNodeFile.EDITS_NEW ));
+    if (oldEdits1.exists())
+      rename(oldEdits1, getImageFile(sd, NameNodeFile.EDITS));
+    File oldEdits2 = new File(sd.root, "edits.new");
+    if (oldEdits2.exists())
+      rename(oldEdits2, getImageFile(sd, NameNodeFile.EDITS_NEW));
 
 
     // Write new layout with 
     // Write new layout with 
     // setting layoutVersion = LAST_PRE_UPGRADE_LAYOUT_VERSION
     // setting layoutVersion = LAST_PRE_UPGRADE_LAYOUT_VERSION
     // means the actual version should be obtained from the image file
     // means the actual version should be obtained from the image file
     this.layoutVersion = LAST_PRE_UPGRADE_LAYOUT_VERSION;
     this.layoutVersion = LAST_PRE_UPGRADE_LAYOUT_VERSION;
-    File newImageFile = getImageFile( sd, NameNodeFile.IMAGE );
+    File newImageFile = getImageFile(sd, NameNodeFile.IMAGE);
     boolean needReformat = false;
     boolean needReformat = false;
-    if( ! newImageFile.exists() ) {
+    if (!newImageFile.exists()) {
       // in pre upgrade versions image file was allowed not to exist
       // in pre upgrade versions image file was allowed not to exist
       // we treat it as non formatted then
       // we treat it as non formatted then
-      LOG.info( "Old image file " + oldImage + " does not exist. " );
+      LOG.info("Old image file " + oldImage + " does not exist. ");
       needReformat = true;
       needReformat = true;
     } else {
     } else {
       sd.write();
       sd.write();
-      LOG.info( "Conversion of " + oldImage + " is complete." );
+      LOG.info("Conversion of " + oldImage + " is complete.");
     }
     }
     return needReformat;
     return needReformat;
   }
   }
@@ -500,15 +500,15 @@ class FSImage extends Storage {
   //
   //
   // Atomic move sequence, to recover from interrupted checkpoint
   // Atomic move sequence, to recover from interrupted checkpoint
   //
   //
-  void recoverInterruptedCheckpoint( StorageDirectory sd ) throws IOException {
-    File curFile = getImageFile( sd, NameNodeFile.IMAGE );
-    File ckptFile = getImageFile( sd, NameNodeFile.IMAGE_NEW );
+  void recoverInterruptedCheckpoint(StorageDirectory sd) throws IOException {
+    File curFile = getImageFile(sd, NameNodeFile.IMAGE);
+    File ckptFile = getImageFile(sd, NameNodeFile.IMAGE_NEW);
 
 
     //
     //
     // If we were in the midst of a checkpoint
     // If we were in the midst of a checkpoint
     //
     //
     if (ckptFile.exists()) {
     if (ckptFile.exists()) {
-      if (getImageFile( sd, NameNodeFile.EDITS_NEW ).exists()) {
+      if (getImageFile(sd, NameNodeFile.EDITS_NEW).exists()) {
         //
         //
         // checkpointing migth have uploaded a new
         // checkpointing migth have uploaded a new
         // merged image, but we discard it here because we are
         // merged image, but we discard it here because we are
@@ -552,23 +552,23 @@ class FSImage extends Storage {
     boolean needToSave = false;
     boolean needToSave = false;
     isUpgradeFinalized = true;
     isUpgradeFinalized = true;
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      StorageDirectory sd = getStorageDir( idx );
-      recoverInterruptedCheckpoint( sd );
-      if( ! sd.getVersionFile().exists() ) {
+      StorageDirectory sd = getStorageDir(idx);
+      recoverInterruptedCheckpoint(sd);
+      if (!sd.getVersionFile().exists()) {
         needToSave |= true;
         needToSave |= true;
         continue; // some of them might have just been formatted
         continue; // some of them might have just been formatted
       }
       }
-      assert getImageFile( sd, NameNodeFile.IMAGE ).exists() :
+      assert getImageFile(sd, NameNodeFile.IMAGE).exists() :
         "Image file must exist.";
         "Image file must exist.";
-      checkpointTime = readCheckpointTime( sd );
-      if( latestCheckpointTime < checkpointTime ) {
+      checkpointTime = readCheckpointTime(sd);
+      if (latestCheckpointTime < checkpointTime) {
         latestCheckpointTime = checkpointTime;
         latestCheckpointTime = checkpointTime;
         latestSD = sd;
         latestSD = sd;
       }
       }
-      if( checkpointTime <= 0L )
+      if (checkpointTime <= 0L)
         needToSave |= true;
         needToSave |= true;
       // set finalized flag
       // set finalized flag
-      isUpgradeFinalized &= ! sd.getPreviousDir().exists();
+      isUpgradeFinalized &= !sd.getPreviousDir().exists();
     }
     }
     assert latestSD != null : "Latest storage directory was not determined.";
     assert latestSD != null : "Latest storage directory was not determined.";
 
 
@@ -576,13 +576,13 @@ class FSImage extends Storage {
     // Load in bits
     // Load in bits
     //
     //
     latestSD.read();
     latestSD.read();
-    needToSave |= loadFSImage( getImageFile( latestSD, NameNodeFile.IMAGE ));
+    needToSave |= loadFSImage(getImageFile(latestSD, NameNodeFile.IMAGE));
 
 
     //
     //
     // read in the editlog from the same directory from
     // read in the editlog from the same directory from
     // which we read in the image
     // which we read in the image
     //
     //
-    needToSave |= ( loadFSEdits( latestSD ) > 0 );
+    needToSave |= (loadFSEdits(latestSD) > 0);
 
 
     return needToSave;
     return needToSave;
   }
   }
@@ -592,7 +592,7 @@ class FSImage extends Storage {
    * filenames and blocks.  Return whether we should
    * filenames and blocks.  Return whether we should
    * "re-save" and consolidate the edit-logs
    * "re-save" and consolidate the edit-logs
    */
    */
-  boolean loadFSImage( File curFile ) throws IOException {
+  boolean loadFSImage(File curFile) throws IOException {
     assert this.getLayoutVersion() < 0 : "Negative layout version is expected.";
     assert this.getLayoutVersion() < 0 : "Negative layout version is expected.";
     assert curFile != null : "curFile is null";
     assert curFile != null : "curFile is null";
 
 
@@ -613,13 +613,13 @@ class FSImage extends Storage {
       // read image version: first appeared in version -1
       // read image version: first appeared in version -1
       imgVersion = in.readInt();
       imgVersion = in.readInt();
       // read namespaceID: first appeared in version -2
       // read namespaceID: first appeared in version -2
-      if( imgVersion <= -2 )
+      if (imgVersion <= -2)
         this.namespaceID = in.readInt();
         this.namespaceID = in.readInt();
       // read number of files
       // read number of files
       int numFiles = 0;
       int numFiles = 0;
       // version 0 does not store version #
       // version 0 does not store version #
       // starts directly with the number of files
       // starts directly with the number of files
-      if( imgVersion >= 0 ) {
+      if (imgVersion >= 0) {
         numFiles = imgVersion;
         numFiles = imgVersion;
         imgVersion = 0;
         imgVersion = 0;
       } else {
       } else {
@@ -627,7 +627,7 @@ class FSImage extends Storage {
       }
       }
       this.layoutVersion = imgVersion;
       this.layoutVersion = imgVersion;
 
 
-      needToSave = ( imgVersion != FSConstants.LAYOUT_VERSION );
+      needToSave = (imgVersion != FSConstants.LAYOUT_VERSION);
 
 
       // read file info
       // read file info
       short replication = FSNamesystem.getFSNamesystem().getDefaultReplication();
       short replication = FSNamesystem.getFSNamesystem().getDefaultReplication();
@@ -635,9 +635,9 @@ class FSImage extends Storage {
         UTF8 name = new UTF8();
         UTF8 name = new UTF8();
         name.readFields(in);
         name.readFields(in);
         // version 0 does not support per file replication
         // version 0 does not support per file replication
-        if( !(imgVersion >= 0) ) {
+        if (!(imgVersion >= 0)) {
           replication = in.readShort(); // other versions do
           replication = in.readShort(); // other versions do
-          replication = FSEditLog.adjustReplication( replication );
+          replication = FSEditLog.adjustReplication(replication);
         }
         }
         int numBlocks = in.readInt();
         int numBlocks = in.readInt();
         Block blocks[] = null;
         Block blocks[] = null;
@@ -648,11 +648,11 @@ class FSImage extends Storage {
             blocks[j].readFields(in);
             blocks[j].readFields(in);
           }
           }
         }
         }
-        fsDir.unprotectedAddFile(name, blocks, replication );
+        fsDir.unprotectedAddFile(name, blocks, replication);
       }
       }
       
       
       // load datanode info
       // load datanode info
-      this.loadDatanodes( imgVersion, in );
+      this.loadDatanodes(imgVersion, in);
     } finally {
     } finally {
       in.close();
       in.close();
     }
     }
@@ -667,19 +667,19 @@ class FSImage extends Storage {
    * @return number of edits loaded
    * @return number of edits loaded
    * @throws IOException
    * @throws IOException
    */
    */
-  int loadFSEdits( StorageDirectory sd ) throws IOException {
+  int loadFSEdits(StorageDirectory sd) throws IOException {
     int numEdits = 0;
     int numEdits = 0;
-    numEdits = editLog.loadFSEdits( getImageFile( sd, NameNodeFile.EDITS ));
-    File editsNew = getImageFile( sd, NameNodeFile.EDITS_NEW );
-    if( editsNew.exists() ) 
-      numEdits += editLog.loadFSEdits( editsNew );
+    numEdits = editLog.loadFSEdits(getImageFile(sd, NameNodeFile.EDITS));
+    File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
+    if (editsNew.exists()) 
+      numEdits += editLog.loadFSEdits(editsNew);
     return numEdits;
     return numEdits;
   }
   }
 
 
   /**
   /**
    * Save the contents of the FS image to the file.
    * Save the contents of the FS image to the file.
    */
    */
-  void saveFSImage( File newFile  ) throws IOException {
+  void saveFSImage(File newFile ) throws IOException {
     FSDirectory fsDir = FSNamesystem.getFSNamesystem().dir;
     FSDirectory fsDir = FSNamesystem.getFSNamesystem().dir;
     //
     //
     // Write out data
     // Write out data
@@ -691,8 +691,8 @@ class FSImage extends Storage {
       out.writeInt(FSConstants.LAYOUT_VERSION);
       out.writeInt(FSConstants.LAYOUT_VERSION);
       out.writeInt(namespaceID);
       out.writeInt(namespaceID);
       out.writeInt(fsDir.rootDir.numItemsInTree() - 1);
       out.writeInt(fsDir.rootDir.numItemsInTree() - 1);
-      saveImage( "", fsDir.rootDir, out );
-      saveDatanodes( out );
+      saveImage("", fsDir.rootDir, out);
+      saveDatanodes(out);
     } finally {
     } finally {
       out.close();
       out.close();
     }
     }
@@ -704,9 +704,9 @@ class FSImage extends Storage {
   void saveFSImage() throws IOException {
   void saveFSImage() throws IOException {
     editLog.createNewIfMissing();
     editLog.createNewIfMissing();
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      StorageDirectory sd = getStorageDir( idx );
-      saveFSImage( getImageFile( sd, NameNodeFile.IMAGE_NEW ));
-      editLog.createEditLogFile( getImageFile( sd, NameNodeFile.EDITS ));
+      StorageDirectory sd = getStorageDir(idx);
+      saveFSImage(getImageFile(sd, NameNodeFile.IMAGE_NEW));
+      editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
     }
     }
     rollFSImage();
     rollFSImage();
   }
   }
@@ -725,27 +725,27 @@ class FSImage extends Storage {
    */
    */
   private int newNamespaceID() {
   private int newNamespaceID() {
     Random r = new Random();
     Random r = new Random();
-    r.setSeed( FSNamesystem.now() );
+    r.setSeed(FSNamesystem.now());
     int newID = 0;
     int newID = 0;
-    while( newID == 0)
-      newID = r.nextInt( 0x7FFFFFFF );  // use 31 bits only
+    while(newID == 0)
+      newID = r.nextInt(0x7FFFFFFF);  // use 31 bits only
     return newID;
     return newID;
   }
   }
 
 
   /** Create new dfs name directory.  Caution: this destroys all files
   /** Create new dfs name directory.  Caution: this destroys all files
    * in this filesystem. */
    * in this filesystem. */
-  void format( StorageDirectory sd ) throws IOException {
+  void format(StorageDirectory sd) throws IOException {
     sd.clearDirectory(); // create currrent dir
     sd.clearDirectory(); // create currrent dir
     sd.lock();
     sd.lock();
     try {
     try {
-      saveFSImage( getImageFile( sd, NameNodeFile.IMAGE ));
-      editLog.createEditLogFile( getImageFile( sd, NameNodeFile.EDITS ));
+      saveFSImage(getImageFile(sd, NameNodeFile.IMAGE));
+      editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
       sd.write();
       sd.write();
     } finally {
     } finally {
       sd.unlock();
       sd.unlock();
     }
     }
-    LOG.info( "Storage directory " + sd.root 
-              + " has been successfully formatted." );
+    LOG.info("Storage directory " + sd.root 
+             + " has been successfully formatted.");
   }
   }
 
 
   public void format() throws IOException {
   public void format() throws IOException {
@@ -753,9 +753,9 @@ class FSImage extends Storage {
     this.namespaceID = newNamespaceID();
     this.namespaceID = newNamespaceID();
     this.cTime = 0L;
     this.cTime = 0L;
     this.checkpointTime = FSNamesystem.now();
     this.checkpointTime = FSNamesystem.now();
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      format( sd );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      format(sd);
     }
     }
   }
   }
 
 
@@ -764,24 +764,24 @@ class FSImage extends Storage {
    */
    */
   private static void saveImage(String parentPrefix, 
   private static void saveImage(String parentPrefix, 
                                 FSDirectory.INode root, 
                                 FSDirectory.INode root, 
-                                DataOutputStream out ) throws IOException {
+                                DataOutputStream out) throws IOException {
     String fullName = "";
     String fullName = "";
-    if( root.getParent() != null) {
+    if (root.getParent() != null) {
       fullName = parentPrefix + "/" + root.getLocalName();
       fullName = parentPrefix + "/" + root.getLocalName();
       new UTF8(fullName).write(out);
       new UTF8(fullName).write(out);
-      out.writeShort( root.getReplication() );
-      if( root.isDir() ) {
+      out.writeShort(root.getReplication());
+      if (root.isDir()) {
         out.writeInt(0);
         out.writeInt(0);
       } else {
       } else {
         int nrBlocks = root.getBlocks().length;
         int nrBlocks = root.getBlocks().length;
-        out.writeInt( nrBlocks );
+        out.writeInt(nrBlocks);
         for (int i = 0; i < nrBlocks; i++)
         for (int i = 0; i < nrBlocks; i++)
           root.getBlocks()[i].write(out);
           root.getBlocks()[i].write(out);
       }
       }
     }
     }
     for(Iterator<INode> it = root.getChildIterator(); it != null &&
     for(Iterator<INode> it = root.getChildIterator(); it != null &&
-          it.hasNext(); ) {
-      saveImage( fullName, it.next(), out );
+          it.hasNext();) {
+      saveImage(fullName, it.next(), out);
     }
     }
   }
   }
 
 
@@ -793,22 +793,22 @@ class FSImage extends Storage {
    * @param out output stream
    * @param out output stream
    * @throws IOException
    * @throws IOException
    */
    */
-  void saveDatanodes( DataOutputStream out ) throws IOException {
+  void saveDatanodes(DataOutputStream out) throws IOException {
     Map datanodeMap = FSNamesystem.getFSNamesystem().datanodeMap;
     Map datanodeMap = FSNamesystem.getFSNamesystem().datanodeMap;
     int size = datanodeMap.size();
     int size = datanodeMap.size();
-    out.writeInt( size );
-    for( Iterator it = datanodeMap.values().iterator(); it.hasNext(); ) {
+    out.writeInt(size);
+    for(Iterator it = datanodeMap.values().iterator(); it.hasNext();) {
       DatanodeImage nodeImage = new DatanodeImage((DatanodeDescriptor) it.next());
       DatanodeImage nodeImage = new DatanodeImage((DatanodeDescriptor) it.next());
-      nodeImage.write( out );
+      nodeImage.write(out);
     }
     }
   }
   }
 
 
-  void loadDatanodes( int version, DataInputStream in ) throws IOException {
-    if( version > -3 ) // pre datanode image version
+  void loadDatanodes(int version, DataInputStream in) throws IOException {
+    if (version > -3) // pre datanode image version
       return;
       return;
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     int size = in.readInt();
     int size = in.readInt();
-    for( int i = 0; i < size; i++ ) {
+    for(int i = 0; i < size; i++) {
       DatanodeImage nodeImage = new DatanodeImage();
       DatanodeImage nodeImage = new DatanodeImage();
       nodeImage.readFields(in);
       nodeImage.readFields(in);
       fsNamesys.unprotectedAddDatanode(nodeImage.getDatanodeDescriptor());
       fsNamesys.unprotectedAddDatanode(nodeImage.getDatanodeDescriptor());
@@ -827,9 +827,9 @@ class FSImage extends Storage {
     if (!editLog.existsNew()) {
     if (!editLog.existsNew()) {
       throw new IOException("New Edits file does not exist");
       throw new IOException("New Edits file does not exist");
     }
     }
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      File ckpt = getImageFile( sd, NameNodeFile.IMAGE_NEW );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      File ckpt = getImageFile(sd, NameNodeFile.IMAGE_NEW);
       if (!ckpt.exists()) {
       if (!ckpt.exists()) {
         throw new IOException("Checkpoint file " + ckpt +
         throw new IOException("Checkpoint file " + ckpt +
                               " does not exist");
                               " does not exist");
@@ -840,10 +840,10 @@ class FSImage extends Storage {
     //
     //
     // Renames new image
     // Renames new image
     //
     //
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      File ckpt = getImageFile( sd, NameNodeFile.IMAGE_NEW );
-      File curFile = getImageFile( sd, NameNodeFile.IMAGE );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      File ckpt = getImageFile(sd, NameNodeFile.IMAGE_NEW);
+      File curFile = getImageFile(sd, NameNodeFile.IMAGE);
       // renameTo fails on Windows if the destination file 
       // renameTo fails on Windows if the destination file 
       // already exists.
       // already exists.
       if (!ckpt.renameTo(curFile)) {
       if (!ckpt.renameTo(curFile)) {
@@ -860,12 +860,12 @@ class FSImage extends Storage {
     //
     //
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.checkpointTime = FSNamesystem.now();
     this.checkpointTime = FSNamesystem.now();
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
       try {
       try {
         sd.write();
         sd.write();
       } catch (IOException e) {
       } catch (IOException e) {
-        LOG.error( "Cannot write file " + sd.root, e );
+        LOG.error("Cannot write file " + sd.root, e);
         editLog.processIOError(idx);
         editLog.processIOError(idx);
         idx--;
         idx--;
       }
       }
@@ -881,7 +881,7 @@ class FSImage extends Storage {
    * Return the name of the image file.
    * Return the name of the image file.
    */
    */
   File getFsImageName() {
   File getFsImageName() {
-    return getImageFile( 0, NameNodeFile.IMAGE );
+    return getImageFile(0, NameNodeFile.IMAGE);
   }
   }
 
 
   /**
   /**
@@ -890,8 +890,8 @@ class FSImage extends Storage {
    */
    */
   File[] getFsImageNameCheckpoint() {
   File[] getFsImageNameCheckpoint() {
     File[] list = new File[getNumStorageDirs()];
     File[] list = new File[getNumStorageDirs()];
-    for( int i = 0; i < getNumStorageDirs(); i++ ) {
-      list[i] = getImageFile( getStorageDir( i ), NameNodeFile.IMAGE_NEW );
+    for(int i = 0; i < getNumStorageDirs(); i++) {
+      list[i] = getImageFile(getStorageDir(i), NameNodeFile.IMAGE_NEW);
     }
     }
     return list;
     return list;
   }
   }

Diff do ficheiro suprimidas por serem muito extensas
+ 232 - 230
src/java/org/apache/hadoop/dfs/FSNamesystem.java


+ 7 - 7
src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java

@@ -29,19 +29,19 @@ import org.apache.hadoop.util.StringUtils;
  */
  */
 class InconsistentFSStateException extends IOException {
 class InconsistentFSStateException extends IOException {
 
 
-  public InconsistentFSStateException( File dir, String descr ) {
-    super( "Directory " + getFilePath( dir )
-           + " is in an inconsistent state: " + descr );
+  public InconsistentFSStateException(File dir, String descr) {
+    super("Directory " + getFilePath(dir)
+          + " is in an inconsistent state: " + descr);
   }
   }
 
 
-  public InconsistentFSStateException( File dir, String descr, Throwable ex ) {
-    this( dir, descr + "\n" + StringUtils.stringifyException(ex) );
+  public InconsistentFSStateException(File dir, String descr, Throwable ex) {
+    this(dir, descr + "\n" + StringUtils.stringifyException(ex));
   }
   }
   
   
-  private static String getFilePath( File dir ) {
+  private static String getFilePath(File dir) {
     try {
     try {
       return dir.getCanonicalPath();
       return dir.getCanonicalPath();
-    } catch( IOException e ) {}
+    } catch(IOException e) {}
     return dir.getPath();
     return dir.getPath();
   }
   }
 }
 }

+ 8 - 8
src/java/org/apache/hadoop/dfs/IncorrectVersionException.java

@@ -27,16 +27,16 @@ import java.io.IOException;
  */
  */
 class IncorrectVersionException extends IOException {
 class IncorrectVersionException extends IOException {
 
 
-  public IncorrectVersionException( int versionReported, String ofWhat ) {
-    this( versionReported, ofWhat, FSConstants.LAYOUT_VERSION );
+  public IncorrectVersionException(int versionReported, String ofWhat) {
+    this(versionReported, ofWhat, FSConstants.LAYOUT_VERSION);
   }
   }
   
   
-  public IncorrectVersionException( int versionReported,
-                                    String ofWhat,
-                                    int versionExpected ) {
-    super( "Unexpected version " 
-           + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
-           + versionReported + ". Expecting = " + versionExpected + "." );
+  public IncorrectVersionException(int versionReported,
+                                   String ofWhat,
+                                   int versionExpected) {
+    super("Unexpected version " 
+          + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
+          + versionReported + ". Expecting = " + versionExpected + ".");
   }
   }
 
 
 }
 }

+ 18 - 18
src/java/org/apache/hadoop/dfs/JspHelper.java

@@ -34,7 +34,7 @@ public class JspHelper {
   static Configuration conf = new Configuration();
   static Configuration conf = new Configuration();
 
 
   static int defaultChunkSizeToView = 
   static int defaultChunkSizeToView = 
-    conf.getInt("dfs.default.chunk.view.size",32 * 1024);
+    conf.getInt("dfs.default.chunk.view.size", 32 * 1024);
   static Random rand = new Random();
   static Random rand = new Random();
 
 
   public JspHelper() {
   public JspHelper() {
@@ -140,9 +140,9 @@ public class JspHelper {
     in.close();
     in.close();
     out.print(new String(buf));
     out.print(new String(buf));
   }
   }
-  public void DFSNodesStatus( ArrayList<DatanodeDescriptor> live,
-                              ArrayList<DatanodeDescriptor> dead ) {
-    if ( fsn != null )
+  public void DFSNodesStatus(ArrayList<DatanodeDescriptor> live,
+                             ArrayList<DatanodeDescriptor> dead) {
+    if (fsn != null)
       fsn.DFSNodesStatus(live, dead);
       fsn.DFSNodesStatus(live, dead);
   }
   }
   public void addTableHeader(JspWriter out) throws IOException {
   public void addTableHeader(JspWriter out) throws IOException {
@@ -161,7 +161,7 @@ public class JspHelper {
     out.print("<tr>");
     out.print("<tr>");
       
       
     for (int i = 0; i < columns.length; i++) {
     for (int i = 0; i < columns.length; i++) {
-      if( row/2*2 == row ) {//even
+      if (row/2*2 == row) {//even
         out.print("<td style=\"vertical-align: top;background-color:LightGrey;\"><B>"+columns[i]+"</B><br></td>");
         out.print("<td style=\"vertical-align: top;background-color:LightGrey;\"><B>"+columns[i]+"</B><br></td>");
       } else {
       } else {
         out.print("<td style=\"vertical-align: top;background-color:LightBlue;\"><B>"+columns[i]+"</B><br></td>");
         out.print("<td style=\"vertical-align: top;background-color:LightBlue;\"><B>"+columns[i]+"</B><br></td>");
@@ -175,7 +175,7 @@ public class JspHelper {
   }
   }
 
 
   public String getSafeModeText() {
   public String getSafeModeText() {
-    if( ! fsn.isInSafeMode() )
+    if (!fsn.isInSafeMode())
       return "";
       return "";
     return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
     return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
   }
   }
@@ -197,29 +197,29 @@ public class JspHelper {
       int sortOrder = SORT_ORDER_ASC;
       int sortOrder = SORT_ORDER_ASC;
             
             
       public NodeComapare(String field, String order) {
       public NodeComapare(String field, String order) {
-        if ( field.equals( "lastcontact" ) ) {
+        if (field.equals("lastcontact")) {
           sortField = FIELD_LAST_CONTACT;
           sortField = FIELD_LAST_CONTACT;
-        } else if ( field.equals( "size" ) ) {
+        } else if (field.equals("size")) {
           sortField = FIELD_SIZE;
           sortField = FIELD_SIZE;
-        } else if ( field.equals( "blocks" ) ) {
+        } else if (field.equals("blocks")) {
           sortField = FIELD_BLOCKS;
           sortField = FIELD_BLOCKS;
-        } else if ( field.equals( "pcused" ) ) {
+        } else if (field.equals("pcused")) {
           sortField = FIELD_DISK_USED;
           sortField = FIELD_DISK_USED;
         } else {
         } else {
           sortField = FIELD_NAME;
           sortField = FIELD_NAME;
         }
         }
                 
                 
-        if ( order.equals("DSC") ) {
+        if (order.equals("DSC")) {
           sortOrder = SORT_ORDER_DSC;
           sortOrder = SORT_ORDER_DSC;
         } else {
         } else {
           sortOrder = SORT_ORDER_ASC;
           sortOrder = SORT_ORDER_ASC;
         }
         }
       }
       }
 
 
-      public int compare( DatanodeDescriptor d1,
-                          DatanodeDescriptor d2 ) {
+      public int compare(DatanodeDescriptor d1,
+                         DatanodeDescriptor d2) {
         int ret = 0;
         int ret = 0;
-        switch ( sortField ) {
+        switch (sortField) {
         case FIELD_LAST_CONTACT:
         case FIELD_LAST_CONTACT:
           ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
           ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
           break;
           break;
@@ -228,21 +228,21 @@ public class JspHelper {
           break;
           break;
         case FIELD_SIZE:
         case FIELD_SIZE:
           long  dlong = d1.getCapacity() - d2.getCapacity();
           long  dlong = d1.getCapacity() - d2.getCapacity();
-          ret = (dlong < 0) ? -1 : ( (dlong > 0) ? 1 : 0 );
+          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
           break;
           break;
         case FIELD_DISK_USED:
         case FIELD_DISK_USED:
           double ddbl =((d2.getRemaining()*1.0/d2.getCapacity())-
           double ddbl =((d2.getRemaining()*1.0/d2.getCapacity())-
                         (d1.getRemaining()*1.0/d1.getCapacity()));
                         (d1.getRemaining()*1.0/d1.getCapacity()));
-          ret = (ddbl < 0) ? -1 : ( (ddbl > 0) ? 1 : 0 );
+          ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
           break;
           break;
         case FIELD_NAME: 
         case FIELD_NAME: 
           ret = d1.getHostName().compareTo(d2.getHostName());
           ret = d1.getHostName().compareTo(d2.getHostName());
           break;
           break;
         }
         }
-        return ( sortOrder == SORT_ORDER_DSC ) ? -ret : ret;
+        return (sortOrder == SORT_ORDER_DSC) ? -ret : ret;
       }
       }
     }
     }
         
         
-    Collections.sort( nodes, new NodeComapare( field, order ) );
+    Collections.sort(nodes, new NodeComapare(field, order));
   }
   }
 }
 }

+ 86 - 86
src/java/org/apache/hadoop/dfs/NameNode.java

@@ -81,7 +81,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   }
   }
     
     
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
-  public static final Log stateChangeLog = LogFactory.getLog( "org.apache.hadoop.dfs.StateChange");
+  public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.dfs.StateChange");
 
 
   private FSNamesystem namesystem;
   private FSNamesystem namesystem;
   private Server server;
   private Server server;
@@ -96,7 +96,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /** Format a new filesystem.  Destroys any filesystem that may already
   /** Format a new filesystem.  Destroys any filesystem that may already
    * exist at this location.  **/
    * exist at this location.  **/
   public static void format(Configuration conf) throws IOException {
   public static void format(Configuration conf) throws IOException {
-    format( conf, false );
+    format(conf, false);
   }
   }
 
 
   private class NameNodeMetrics implements Updater {
   private class NameNodeMetrics implements Updater {
@@ -208,7 +208,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   public NameNode(Configuration conf) throws IOException {
   public NameNode(Configuration conf) throws IOException {
     InetSocketAddress addr = 
     InetSocketAddress addr = 
       DataNode.createSocketAddr(conf.get("fs.default.name"));
       DataNode.createSocketAddr(conf.get("fs.default.name"));
-    init( addr.getHostName(), addr.getPort(), conf );
+    init(addr.getHostName(), addr.getPort(), conf);
   }
   }
 
 
   /**
   /**
@@ -221,7 +221,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   public NameNode(String bindAddress, int port, 
   public NameNode(String bindAddress, int port, 
                   Configuration conf
                   Configuration conf
                   ) throws IOException {
                   ) throws IOException {
-    init( bindAddress, port, conf );
+    init(bindAddress, port, conf);
   }
   }
 
 
   /**
   /**
@@ -239,7 +239,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    * Stop all NameNode threads and wait for all to finish.
    * Stop all NameNode threads and wait for all to finish.
    */
    */
   public void stop() {
   public void stop() {
-    if (! stopRequested) {
+    if (!stopRequested) {
       stopRequested = true;
       stopRequested = true;
       namesystem.close();
       namesystem.close();
       emptier.interrupt();
       emptier.interrupt();
@@ -255,7 +255,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    */
    */
   public LocatedBlock[] open(String src) throws IOException {
   public LocatedBlock[] open(String src) throws IOException {
     String clientMachine = Server.getRemoteAddress();
     String clientMachine = Server.getRemoteAddress();
-    if ( clientMachine == null ) {
+    if (clientMachine == null) {
       clientMachine = "";
       clientMachine = "";
     }
     }
     Object openResults[] = namesystem.open(clientMachine, new UTF8(src));
     Object openResults[] = namesystem.open(clientMachine, new UTF8(src));
@@ -282,7 +282,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
                              long blockSize
                              long blockSize
                              ) throws IOException {
                              ) throws IOException {
     String clientMachine = Server.getRemoteAddress();
     String clientMachine = Server.getRemoteAddress();
-    if ( clientMachine == null ) {
+    if (clientMachine == null) {
       clientMachine = "";
       clientMachine = "";
     }
     }
     stateChangeLog.debug("*DIR* NameNode.create: file "
     stateChangeLog.debug("*DIR* NameNode.create: file "
@@ -303,10 +303,10 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     return new LocatedBlock(b, targets);
     return new LocatedBlock(b, targets);
   }
   }
 
 
-  public boolean setReplication( String src, 
-                                 short replication
-                                 ) throws IOException {
-    return namesystem.setReplication( src, replication );
+  public boolean setReplication(String src, 
+                                short replication
+                                ) throws IOException {
+    return namesystem.setReplication(src, replication);
   }
   }
     
     
   /**
   /**
@@ -328,8 +328,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    */
    */
   public void abandonBlock(Block b, String src) throws IOException {
   public void abandonBlock(Block b, String src) throws IOException {
     stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
     stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
-                         +b.getBlockName()+" of file "+src );
-    if (! namesystem.abandonBlock(b, new UTF8(src))) {
+                         +b.getBlockName()+" of file "+src);
+    if (!namesystem.abandonBlock(b, new UTF8(src))) {
       throw new IOException("Cannot abandon block during write to " + src);
       throw new IOException("Cannot abandon block during write to " + src);
     }
     }
   }
   }
@@ -337,13 +337,13 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    */
    */
   public void abandonFileInProgress(String src, 
   public void abandonFileInProgress(String src, 
                                     String holder) throws IOException {
                                     String holder) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.abandonFileInProgress:" + src );
+    stateChangeLog.debug("*DIR* NameNode.abandonFileInProgress:" + src);
     namesystem.abandonFileInProgress(new UTF8(src), new UTF8(holder));
     namesystem.abandonFileInProgress(new UTF8(src), new UTF8(holder));
   }
   }
   /**
   /**
    */
    */
   public boolean complete(String src, String clientName) throws IOException {
   public boolean complete(String src, String clientName) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName );
+    stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName);
     int returnCode = namesystem.completeFile(new UTF8(src), new UTF8(clientName));
     int returnCode = namesystem.completeFile(new UTF8(src), new UTF8(clientName));
     if (returnCode == STILL_WAITING) {
     if (returnCode == STILL_WAITING) {
       return false;
       return false;
@@ -375,7 +375,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
   /**
    */
    */
   public String[][] getHints(String src, long start, long len) throws IOException {
   public String[][] getHints(String src, long start, long len) throws IOException {
-    return namesystem.getDatanodeHints( src, start, len );
+    return namesystem.getDatanodeHints(src, start, len);
   }
   }
     
     
   public long getBlockSize(String filename) throws IOException {
   public long getBlockSize(String filename) throws IOException {
@@ -385,7 +385,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
   /**
    */
    */
   public boolean rename(String src, String dst) throws IOException {
   public boolean rename(String src, String dst) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst );
+    stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
     if (!checkPathLength(dst)) {
     if (!checkPathLength(dst)) {
       throw new IOException("rename: Pathname too long.  Limit " 
       throw new IOException("rename: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
@@ -400,7 +400,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
   /**
    */
    */
   public boolean delete(String src) throws IOException {
   public boolean delete(String src) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.delete: " + src );
+    stateChangeLog.debug("*DIR* NameNode.delete: " + src);
     return namesystem.delete(new UTF8(src));
     return namesystem.delete(new UTF8(src));
   }
   }
 
 
@@ -431,12 +431,12 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
   /**
    */
    */
   public boolean mkdirs(String src) throws IOException {
   public boolean mkdirs(String src) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src );
+    stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
     if (!checkPathLength(src)) {
     if (!checkPathLength(src)) {
       throw new IOException("mkdirs: Pathname too long.  Limit " 
       throw new IOException("mkdirs: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
     }
-    return namesystem.mkdirs( src );
+    return namesystem.mkdirs(src);
   }
   }
 
 
   /** @deprecated */ @Deprecated
   /** @deprecated */ @Deprecated
@@ -502,8 +502,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
   /**
    * @inheritDoc
    * @inheritDoc
    */
    */
-  public boolean setSafeMode( SafeModeAction action ) throws IOException {
-    switch( action ) {
+  public boolean setSafeMode(SafeModeAction action) throws IOException {
+    switch(action) {
     case SAFEMODE_LEAVE: // leave safe mode
     case SAFEMODE_LEAVE: // leave safe mode
       namesystem.leaveSafeMode();
       namesystem.leaveSafeMode();
       break;
       break;
@@ -567,11 +567,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   ////////////////////////////////////////////////////////////////
   ////////////////////////////////////////////////////////////////
   /** 
   /** 
    */
    */
-  public DatanodeRegistration register( DatanodeRegistration nodeReg,
-                                        String networkLocation
-                                        ) throws IOException {
-    verifyVersion( nodeReg.getVersion() );
-    namesystem.registerDatanode( nodeReg, networkLocation );
+  public DatanodeRegistration register(DatanodeRegistration nodeReg,
+                                       String networkLocation
+                                       ) throws IOException {
+    verifyVersion(nodeReg.getVersion());
+    namesystem.registerDatanode(nodeReg, networkLocation);
       
       
     return nodeReg;
     return nodeReg;
   }
   }
@@ -581,25 +581,25 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    * Return a block-oriented command for the datanode to execute.
    * Return a block-oriented command for the datanode to execute.
    * This will be either a transfer or a delete operation.
    * This will be either a transfer or a delete operation.
    */
    */
-  public DatanodeCommand sendHeartbeat( DatanodeRegistration nodeReg,
-                                        long capacity, 
-                                        long remaining,
-                                        int xmitsInProgress,
-                                        int xceiverCount) throws IOException {
+  public DatanodeCommand sendHeartbeat(DatanodeRegistration nodeReg,
+                                       long capacity, 
+                                       long remaining,
+                                       int xmitsInProgress,
+                                       int xceiverCount) throws IOException {
     Object xferResults[] = new Object[2];
     Object xferResults[] = new Object[2];
     xferResults[0] = xferResults[1] = null;
     xferResults[0] = xferResults[1] = null;
     Object deleteList[] = new Object[1];
     Object deleteList[] = new Object[1];
     deleteList[0] = null; 
     deleteList[0] = null; 
 
 
-    verifyRequest( nodeReg );
-    if( namesystem.gotHeartbeat( nodeReg, capacity, remaining, 
-                                 xceiverCount, 
-                                 xmitsInProgress,
-                                 xferResults,
-                                 deleteList)) {
+    verifyRequest(nodeReg);
+    if (namesystem.gotHeartbeat(nodeReg, capacity, remaining, 
+                                xceiverCount, 
+                                xmitsInProgress,
+                                xferResults,
+                                deleteList)) {
       // request block report from the datanode
       // request block report from the datanode
       assert(xferResults[0] == null && deleteList[0] == null);
       assert(xferResults[0] == null && deleteList[0] == null);
-      return new DatanodeCommand( DataNodeAction.DNA_REGISTER );
+      return new DatanodeCommand(DataNodeAction.DNA_REGISTER);
     }
     }
         
         
     //
     //
@@ -622,27 +622,27 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     return null;
     return null;
   }
   }
 
 
-  public DatanodeCommand blockReport( DatanodeRegistration nodeReg,
-                                      Block blocks[]) throws IOException {
-    verifyRequest( nodeReg );
+  public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
+                                     Block blocks[]) throws IOException {
+    verifyRequest(nodeReg);
     stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
     stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
-                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks" );
+                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks");
 
 
-    Block blocksToDelete[] = namesystem.processReport( nodeReg, blocks );
-    if( blocksToDelete != null && blocksToDelete.length > 0 )
-      return new BlockCommand( blocksToDelete );
-    if( getFSImage().isUpgradeFinalized() )
-      return new DatanodeCommand( DataNodeAction.DNA_FINALIZE );
+    Block blocksToDelete[] = namesystem.processReport(nodeReg, blocks);
+    if (blocksToDelete != null && blocksToDelete.length > 0)
+      return new BlockCommand(blocksToDelete);
+    if (getFSImage().isUpgradeFinalized())
+      return new DatanodeCommand(DataNodeAction.DNA_FINALIZE);
     return null;
     return null;
   }
   }
 
 
   public void blockReceived(DatanodeRegistration nodeReg, 
   public void blockReceived(DatanodeRegistration nodeReg, 
                             Block blocks[]) throws IOException {
                             Block blocks[]) throws IOException {
-    verifyRequest( nodeReg );
+    verifyRequest(nodeReg);
     stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
     stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
-                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks." );
+                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
     for (int i = 0; i < blocks.length; i++) {
     for (int i = 0; i < blocks.length; i++) {
-      namesystem.blockReceived( nodeReg, blocks[i] );
+      namesystem.blockReceived(nodeReg, blocks[i]);
     }
     }
   }
   }
 
 
@@ -653,12 +653,12 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
                           String msg) throws IOException {
                           String msg) throws IOException {
     // Log error message from datanode
     // Log error message from datanode
     LOG.info("Report from " + nodeReg.getName() + ": " + msg);
     LOG.info("Report from " + nodeReg.getName() + ": " + msg);
-    if( errorCode == DatanodeProtocol.NOTIFY ) {
+    if (errorCode == DatanodeProtocol.NOTIFY) {
       return;
       return;
     }
     }
-    verifyRequest( nodeReg );
-    if( errorCode == DatanodeProtocol.DISK_ERROR ) {
-      namesystem.removeDatanode( nodeReg );            
+    verifyRequest(nodeReg);
+    if (errorCode == DatanodeProtocol.DISK_ERROR) {
+      namesystem.removeDatanode(nodeReg);            
     }
     }
   }
   }
     
     
@@ -675,10 +675,10 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    * @param nodeReg data node registration
    * @param nodeReg data node registration
    * @throws IOException
    * @throws IOException
    */
    */
-  public void verifyRequest( DatanodeRegistration nodeReg ) throws IOException {
-    verifyVersion( nodeReg.getVersion() );
-    if( ! namesystem.getRegistrationID().equals( nodeReg.getRegistrationID() ))
-      throw new UnregisteredDatanodeException( nodeReg );
+  public void verifyRequest(DatanodeRegistration nodeReg) throws IOException {
+    verifyVersion(nodeReg.getVersion());
+    if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID()))
+      throw new UnregisteredDatanodeException(nodeReg);
   }
   }
     
     
   /**
   /**
@@ -687,9 +687,9 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    * @param version
    * @param version
    * @throws IOException
    * @throws IOException
    */
    */
-  public void verifyVersion( int version ) throws IOException {
-    if( version != LAYOUT_VERSION )
-      throw new IncorrectVersionException( version, "data node" );
+  public void verifyVersion(int version) throws IOException {
+    if (version != LAYOUT_VERSION)
+      throw new IncorrectVersionException(version, "data node");
   }
   }
 
 
   /**
   /**
@@ -739,22 +739,22 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   private static boolean format(Configuration conf,
   private static boolean format(Configuration conf,
                                 boolean isConfirmationNeeded
                                 boolean isConfirmationNeeded
                                 ) throws IOException {
                                 ) throws IOException {
-    Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs( conf );
-    for( Iterator<File> it = dirsToFormat.iterator(); it.hasNext(); ) {
+    Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
+    for(Iterator<File> it = dirsToFormat.iterator(); it.hasNext();) {
       File curDir = it.next();
       File curDir = it.next();
-      if( ! curDir.exists() )
+      if (!curDir.exists())
         continue;
         continue;
-      if( isConfirmationNeeded ) {
+      if (isConfirmationNeeded) {
         System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) ");
         System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) ");
         if (!(System.in.read() == 'Y')) {
         if (!(System.in.read() == 'Y')) {
           System.err.println("Format aborted in "+ curDir);
           System.err.println("Format aborted in "+ curDir);
           return true;
           return true;
         }
         }
-        while( System.in.read() != '\n' ); // discard the enter-key
+        while(System.in.read() != '\n'); // discard the enter-key
       }
       }
     }
     }
 
 
-    FSNamesystem nsys = new FSNamesystem(new FSImage( dirsToFormat ));
+    FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat));
     nsys.dir.fsImage.format();
     nsys.dir.fsImage.format();
     return false;
     return false;
   }
   }
@@ -765,38 +765,38 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   }
   }
 
 
   private static StartupOption parseArguments(String args[], 
   private static StartupOption parseArguments(String args[], 
-                                              Configuration conf ) {
+                                              Configuration conf) {
     int argsLen = (args == null) ? 0 : args.length;
     int argsLen = (args == null) ? 0 : args.length;
     StartupOption startOpt = StartupOption.REGULAR;
     StartupOption startOpt = StartupOption.REGULAR;
-    for( int i=0; i < argsLen; i++ ) {
+    for(int i=0; i < argsLen; i++) {
       String cmd = args[i];
       String cmd = args[i];
-      if( "-format".equalsIgnoreCase(cmd) ) {
+      if ("-format".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.FORMAT;
         startOpt = StartupOption.FORMAT;
-      } else if( "-regular".equalsIgnoreCase(cmd) ) {
+      } else if ("-regular".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.REGULAR;
         startOpt = StartupOption.REGULAR;
-      } else if( "-upgrade".equalsIgnoreCase(cmd) ) {
+      } else if ("-upgrade".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.UPGRADE;
         startOpt = StartupOption.UPGRADE;
-      } else if( "-rollback".equalsIgnoreCase(cmd) ) {
+      } else if ("-rollback".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.ROLLBACK;
         startOpt = StartupOption.ROLLBACK;
       } else
       } else
         return null;
         return null;
     }
     }
-    conf.setObject( "dfs.namenode.startup", startOpt );
+    conf.setObject("dfs.namenode.startup", startOpt);
     return startOpt;
     return startOpt;
   }
   }
 
 
-  static NameNode createNameNode( String argv[], 
-                                  Configuration conf ) throws IOException {
-    if( conf == null )
+  static NameNode createNameNode(String argv[], 
+                                 Configuration conf) throws IOException {
+    if (conf == null)
       conf = new Configuration();
       conf = new Configuration();
-    StartupOption startOpt = parseArguments( argv, conf );
-    if( startOpt == null ) {
+    StartupOption startOpt = parseArguments(argv, conf);
+    if (startOpt == null) {
       printUsage();
       printUsage();
       return null;
       return null;
     }
     }
       
       
-    if( startOpt == StartupOption.FORMAT ) {
-      boolean aborted = format( conf, true );
+    if (startOpt == StartupOption.FORMAT) {
+      boolean aborted = format(conf, true);
       System.exit(aborted ? 1 : 0);
       System.exit(aborted ? 1 : 0);
     }
     }
       
       
@@ -808,11 +808,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    */
    */
   public static void main(String argv[]) throws Exception {
   public static void main(String argv[]) throws Exception {
     try {
     try {
-      NameNode namenode = createNameNode( argv, null );
-      if( namenode != null )
+      NameNode namenode = createNameNode(argv, null);
+      if (namenode != null)
         namenode.join();
         namenode.join();
-    } catch ( Throwable e ) {
-      LOG.error( StringUtils.stringifyException( e ) );
+    } catch (Throwable e) {
+      LOG.error(StringUtils.stringifyException(e));
       System.exit(-1);
       System.exit(-1);
     }
     }
   }
   }

+ 24 - 24
src/java/org/apache/hadoop/dfs/NamenodeFsck.java

@@ -95,9 +95,9 @@ public class NamenodeFsck {
    * @throws IOException
    * @throws IOException
    */
    */
   public NamenodeFsck(Configuration conf,
   public NamenodeFsck(Configuration conf,
-      NameNode nn,
-      Map<String,String[]> pmap,
-      HttpServletResponse response) throws IOException {
+                      NameNode nn,
+                      Map<String,String[]> pmap,
+                      HttpServletResponse response) throws IOException {
     this.conf = conf;
     this.conf = conf;
     this.nn = nn;
     this.nn = nn;
     this.out = response.getWriter();
     this.out = response.getWriter();
@@ -215,13 +215,13 @@ public class NamenodeFsck {
       }
       }
       res.corruptFiles++;
       res.corruptFiles++;
       switch(fixing) {
       switch(fixing) {
-        case FIXING_NONE:
-          break;
-        case FIXING_MOVE:
-          lostFoundMove(file, blocks);
-          break;
-        case FIXING_DELETE:
-          nn.delete(file.getPath());
+      case FIXING_NONE:
+        break;
+      case FIXING_MOVE:
+        lostFoundMove(file, blocks);
+        break;
+      case FIXING_DELETE:
+        nn.delete(file.getPath());
       }
       }
     }
     }
     if (showFiles) {
     if (showFiles) {
@@ -237,9 +237,9 @@ public class NamenodeFsck {
   }
   }
   
   
   private void lostFoundMove(DFSFileInfo file, LocatedBlock[] blocks)
   private void lostFoundMove(DFSFileInfo file, LocatedBlock[] blocks)
-  throws IOException {
+    throws IOException {
     DFSClient dfs = new DFSClient(DataNode.createSocketAddr(
     DFSClient dfs = new DFSClient(DataNode.createSocketAddr(
-        conf.get("fs.default.name", "local")), conf);
+                                                            conf.get("fs.default.name", "local")), conf);
     if (!lfInited) {
     if (!lfInited) {
       lostFoundInit(dfs);
       lostFoundInit(dfs);
     }
     }
@@ -304,8 +304,8 @@ public class NamenodeFsck {
    * bad. Both places should be refactored to provide a method to copy blocks
    * bad. Both places should be refactored to provide a method to copy blocks
    * around.
    * around.
    */
    */
-      private void copyBlock(DFSClient dfs, LocatedBlock lblock,
-          OutputStream fos) throws Exception {
+  private void copyBlock(DFSClient dfs, LocatedBlock lblock,
+                         OutputStream fos) throws Exception {
     int failures = 0;
     int failures = 0;
     InetSocketAddress targetAddr = null;
     InetSocketAddress targetAddr = null;
     TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
     TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
@@ -398,11 +398,11 @@ public class NamenodeFsck {
    * Pick the best node from which to stream the data.
    * Pick the best node from which to stream the data.
    * That's the local one, if available.
    * That's the local one, if available.
    */
    */
-      Random r = new Random();
+  Random r = new Random();
   private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
   private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
-      TreeSet<DatanodeInfo> deadNodes) throws IOException {
+                                TreeSet<DatanodeInfo> deadNodes) throws IOException {
     if ((nodes == null) ||
     if ((nodes == null) ||
-            (nodes.length - deadNodes.size() < 1)) {
+        (nodes.length - deadNodes.size() < 1)) {
       throw new IOException("No live nodes contain current block");
       throw new IOException("No live nodes contain current block");
     }
     }
     DatanodeInfo chosenNode = null;
     DatanodeInfo chosenNode = null;
@@ -433,12 +433,12 @@ public class NamenodeFsck {
         lfInitedOk = dfs.mkdirs(lfName);
         lfInitedOk = dfs.mkdirs(lfName);
         lostFound = lfName;
         lostFound = lfName;
       } else        if (!dfs.isDirectory(lfName)) {
       } else        if (!dfs.isDirectory(lfName)) {
-          LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
-          lfInitedOk = false;
-        }  else { // exists and isDirectory
-          lostFound = lfName;
-          lfInitedOk = true;
-        }
+        LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
+        lfInitedOk = false;
+      }  else { // exists and isDirectory
+        lostFound = lfName;
+        lfInitedOk = true;
+      }
     }  catch (Exception e) {
     }  catch (Exception e) {
       e.printStackTrace();
       e.printStackTrace();
       lfInitedOk = false;
       lfInitedOk = false;
@@ -584,7 +584,7 @@ public class NamenodeFsck {
       res.append("\n Total size:\t" + totalSize + " B");
       res.append("\n Total size:\t" + totalSize + " B");
       res.append("\n Total blocks:\t" + totalBlocks);
       res.append("\n Total blocks:\t" + totalBlocks);
       if (totalBlocks > 0) res.append(" (avg. block size "
       if (totalBlocks > 0) res.append(" (avg. block size "
-          + (totalSize / totalBlocks) + " B)");
+                                      + (totalSize / totalBlocks) + " B)");
       res.append("\n Total dirs:\t" + totalDirs);
       res.append("\n Total dirs:\t" + totalDirs);
       res.append("\n Total files:\t" + totalFiles);
       res.append("\n Total files:\t" + totalFiles);
       if (missingSize > 0) {
       if (missingSize > 0) {

+ 7 - 7
src/java/org/apache/hadoop/dfs/NamespaceInfo.java

@@ -41,8 +41,8 @@ class NamespaceInfo extends StorageInfo implements Writable {
     buildVersion = null;
     buildVersion = null;
   }
   }
   
   
-  public NamespaceInfo( int nsID, long cT ) {
-    super( FSConstants.LAYOUT_VERSION, nsID, cT );
+  public NamespaceInfo(int nsID, long cT) {
+    super(FSConstants.LAYOUT_VERSION, nsID, cT);
     buildVersion = Storage.getBuildVersion();
     buildVersion = Storage.getBuildVersion();
   }
   }
   
   
@@ -60,14 +60,14 @@ class NamespaceInfo extends StorageInfo implements Writable {
   }
   }
 
 
   public void write(DataOutput out) throws IOException {
   public void write(DataOutput out) throws IOException {
-    UTF8.writeString( out, getBuildVersion() );
-    out.writeInt( getLayoutVersion() );
-    out.writeInt( getNamespaceID() );
-    out.writeLong( getCTime() );
+    UTF8.writeString(out, getBuildVersion());
+    out.writeInt(getLayoutVersion());
+    out.writeInt(getNamespaceID());
+    out.writeLong(getCTime());
   }
   }
 
 
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
-    buildVersion = UTF8.readString( in );
+    buildVersion = UTF8.readString(in);
     layoutVersion = in.readInt();
     layoutVersion = in.readInt();
     namespaceID = in.readInt();
     namespaceID = in.readInt();
     cTime = in.readLong();
     cTime = in.readLong();

+ 2 - 2
src/java/org/apache/hadoop/dfs/SafeModeException.java

@@ -10,8 +10,8 @@ import java.io.IOException;
  */
  */
 public class SafeModeException extends IOException {
 public class SafeModeException extends IOException {
 
 
-  public SafeModeException( String text, FSNamesystem.SafeModeInfo mode  ) {
-    super( text + ". Name node is in safe mode.\n" + mode.getTurnOffTip());
+  public SafeModeException(String text, FSNamesystem.SafeModeInfo mode ) {
+    super(text + ". Name node is in safe mode.\n" + mode.getTurnOffTip());
   }
   }
 
 
 }
 }

+ 3 - 3
src/java/org/apache/hadoop/dfs/SecondaryNameNode.java

@@ -426,9 +426,9 @@ public class SecondaryNameNode implements FSConstants, Runnable {
    */
    */
   public static class GetImageServlet extends HttpServlet {
   public static class GetImageServlet extends HttpServlet {
     @SuppressWarnings("unchecked")
     @SuppressWarnings("unchecked")
-      public void doGet(HttpServletRequest request,
-                        HttpServletResponse response
-                        ) throws ServletException, IOException {
+    public void doGet(HttpServletRequest request,
+                      HttpServletResponse response
+                      ) throws ServletException, IOException {
       Map<String,String[]> pmap = request.getParameterMap();
       Map<String,String[]> pmap = request.getParameterMap();
       try {
       try {
         ServletContext context = getServletContext();
         ServletContext context = getServletContext();

+ 159 - 159
src/java/org/apache/hadoop/dfs/Storage.java

@@ -38,7 +38,7 @@ import org.apache.hadoop.util.VersionInfo;
 /**
 /**
  * Common class for storage information.
  * Common class for storage information.
  * 
  * 
- * TODO namespaceID should be long and computed as hash( address + port )
+ * TODO namespaceID should be long and computed as hash(address + port)
  * @author Konstantin Shvachko
  * @author Konstantin Shvachko
  */
  */
 class StorageInfo {
 class StorageInfo {
@@ -47,16 +47,16 @@ class StorageInfo {
   long  cTime;          // creation timestamp
   long  cTime;          // creation timestamp
   
   
   StorageInfo () {
   StorageInfo () {
-    this( 0, 0, 0L );
+    this(0, 0, 0L);
   }
   }
   
   
-  StorageInfo( int layoutV, int nsID, long cT ) {
+  StorageInfo(int layoutV, int nsID, long cT) {
     layoutVersion = layoutV;
     layoutVersion = layoutV;
     namespaceID = nsID;
     namespaceID = nsID;
     cTime = cT;
     cTime = cT;
   }
   }
   
   
-  StorageInfo( StorageInfo from ) {
+  StorageInfo(StorageInfo from) {
     layoutVersion = from.layoutVersion;
     layoutVersion = from.layoutVersion;
     namespaceID = from.namespaceID;
     namespaceID = from.namespaceID;
     cTime = from.cTime;
     cTime = from.cTime;
@@ -124,7 +124,7 @@ abstract class Storage extends StorageInfo {
     File              root; // root directory
     File              root; // root directory
     FileLock          lock; // storage lock
     FileLock          lock; // storage lock
     
     
-    StorageDirectory( File dir ) {
+    StorageDirectory(File dir) {
       this.root = dir;
       this.root = dir;
       this.lock = null;
       this.lock = null;
     }
     }
@@ -135,17 +135,17 @@ abstract class Storage extends StorageInfo {
      * @throws IOException if file cannot be read or contains inconsistent data
      * @throws IOException if file cannot be read or contains inconsistent data
      */
      */
     void read() throws IOException {
     void read() throws IOException {
-      read( getVersionFile() );
+      read(getVersionFile());
     }
     }
     
     
-    void read( File from ) throws IOException {
-      RandomAccessFile file = new RandomAccessFile( from, "rws" );
+    void read(File from) throws IOException {
+      RandomAccessFile file = new RandomAccessFile(from, "rws");
       try {
       try {
-        FileInputStream in = new FileInputStream( file.getFD() );
+        FileInputStream in = new FileInputStream(file.getFD());
         file.seek(0);
         file.seek(0);
         Properties props = new Properties();
         Properties props = new Properties();
-        props.load( in );
-        getFields( props, this );
+        props.load(in);
+        getFields(props, this);
       } finally {
       } finally {
         file.close();
         file.close();
       }
       }
@@ -157,17 +157,17 @@ abstract class Storage extends StorageInfo {
      * @throws IOException
      * @throws IOException
      */
      */
     void write() throws IOException {
     void write() throws IOException {
-      write( getVersionFile() );
+      write(getVersionFile());
     }
     }
 
 
-    void write( File to ) throws IOException {
+    void write(File to) throws IOException {
       Properties props = new Properties();
       Properties props = new Properties();
-      setFields( props, this );
-      RandomAccessFile file = new RandomAccessFile( to, "rws" );
+      setFields(props, this);
+      RandomAccessFile file = new RandomAccessFile(to, "rws");
       try {
       try {
         file.seek(0);
         file.seek(0);
-        FileOutputStream out = new FileOutputStream( file.getFD() );
-        props.store( out, null );
+        FileOutputStream out = new FileOutputStream(file.getFD());
+        props.store(out, null);
       } finally {
       } finally {
         file.close();
         file.close();
       }
       }
@@ -188,33 +188,33 @@ abstract class Storage extends StorageInfo {
      */
      */
     void clearDirectory() throws IOException {
     void clearDirectory() throws IOException {
       File curDir = this.getCurrentDir();
       File curDir = this.getCurrentDir();
-      if( curDir.exists() )
-        if( ! (FileUtil.fullyDelete( curDir )) )
-          throw new IOException("Cannot remove current directory: " + curDir );
-      if( ! curDir.mkdirs() )
-        throw new IOException( "Cannot create directory " + curDir );
+      if (curDir.exists())
+        if (!(FileUtil.fullyDelete(curDir)))
+          throw new IOException("Cannot remove current directory: " + curDir);
+      if (!curDir.mkdirs())
+        throw new IOException("Cannot create directory " + curDir);
     }
     }
 
 
     File getCurrentDir() {
     File getCurrentDir() {
-      return new File( root, STORAGE_DIR_CURRENT );
+      return new File(root, STORAGE_DIR_CURRENT);
     }
     }
     File getVersionFile() {
     File getVersionFile() {
-      return new File( new File( root, STORAGE_DIR_CURRENT ), STORAGE_FILE_VERSION );
+      return new File(new File(root, STORAGE_DIR_CURRENT), STORAGE_FILE_VERSION);
     }
     }
     File getPreviousVersionFile() {
     File getPreviousVersionFile() {
-      return new File( new File( root, STORAGE_DIR_PREVIOUS ), STORAGE_FILE_VERSION );
+      return new File(new File(root, STORAGE_DIR_PREVIOUS), STORAGE_FILE_VERSION);
     }
     }
     File getPreviousDir() {
     File getPreviousDir() {
-      return new File( root, STORAGE_DIR_PREVIOUS );
+      return new File(root, STORAGE_DIR_PREVIOUS);
     }
     }
     File getPreviousTmp() {
     File getPreviousTmp() {
-      return new File( root, STORAGE_TMP_PREVIOUS );
+      return new File(root, STORAGE_TMP_PREVIOUS);
     }
     }
     File getRemovedTmp() {
     File getRemovedTmp() {
-      return new File( root, STORAGE_TMP_REMOVED );
+      return new File(root, STORAGE_TMP_REMOVED);
     }
     }
     File getFinalizedTmp() {
     File getFinalizedTmp() {
-      return new File( root, STORAGE_TMP_FINALIZED );
+      return new File(root, STORAGE_TMP_FINALIZED);
     }
     }
 
 
     /**
     /**
@@ -226,40 +226,40 @@ abstract class Storage extends StorageInfo {
      * @throws {@link InconsistentFSStateException} if directory state is not 
      * @throws {@link InconsistentFSStateException} if directory state is not 
      * consistent and cannot be recovered 
      * consistent and cannot be recovered 
      */
      */
-    StorageState analyzeStorage( StartupOption startOpt ) throws IOException {
+    StorageState analyzeStorage(StartupOption startOpt) throws IOException {
       assert root != null : "root is null";
       assert root != null : "root is null";
       String rootPath = root.getCanonicalPath();
       String rootPath = root.getCanonicalPath();
       try { // check that storage exists
       try { // check that storage exists
-        if( ! root.exists() ) {
+        if (!root.exists()) {
           // storage directory does not exist
           // storage directory does not exist
-          if( startOpt != StartupOption.FORMAT ) {
-            LOG.info( "Storage directory " + rootPath + " does not exist." );
+          if (startOpt != StartupOption.FORMAT) {
+            LOG.info("Storage directory " + rootPath + " does not exist.");
             return StorageState.NON_EXISTENT;
             return StorageState.NON_EXISTENT;
           }
           }
-          LOG.info( rootPath + " does not exist. Creating ..." );
-          if( ! root.mkdirs() )
-            throw new IOException( "Cannot create directory " + rootPath );
+          LOG.info(rootPath + " does not exist. Creating ...");
+          if (!root.mkdirs())
+            throw new IOException("Cannot create directory " + rootPath);
         }
         }
         // or is inaccessible
         // or is inaccessible
-        if( ! root.isDirectory() ) {
-          LOG.info( rootPath + "is not a directory." );
+        if (!root.isDirectory()) {
+          LOG.info(rootPath + "is not a directory.");
           return StorageState.NON_EXISTENT;
           return StorageState.NON_EXISTENT;
         }
         }
-        if( ! root.canWrite() ) {
-          LOG.info( "Cannot access storage directory " + rootPath );
+        if (!root.canWrite()) {
+          LOG.info("Cannot access storage directory " + rootPath);
           return StorageState.NON_EXISTENT;
           return StorageState.NON_EXISTENT;
         }
         }
-      } catch( SecurityException ex ) {
-        LOG.info( "Cannot access storage directory " + rootPath, ex );
+      } catch(SecurityException ex) {
+        LOG.info("Cannot access storage directory " + rootPath, ex);
         return StorageState.NON_EXISTENT;
         return StorageState.NON_EXISTENT;
       }
       }
 
 
       this.lock(); // lock storage if it exists
       this.lock(); // lock storage if it exists
 
 
-      if( startOpt == StartupOption.FORMAT )
+      if (startOpt == StartupOption.FORMAT)
         return StorageState.NOT_FORMATTED;
         return StorageState.NOT_FORMATTED;
       // check whether a conversion is required
       // check whether a conversion is required
-      if( isConversionNeeded( this ) )
+      if (isConversionNeeded(this))
         return StorageState.CONVERT;
         return StorageState.CONVERT;
       // check whether current directory is valid
       // check whether current directory is valid
       File versionFile = getVersionFile();
       File versionFile = getVersionFile();
@@ -271,48 +271,48 @@ abstract class Storage extends StorageInfo {
       boolean hasRemovedTmp = getRemovedTmp().exists();
       boolean hasRemovedTmp = getRemovedTmp().exists();
       boolean hasFinalizedTmp = getFinalizedTmp().exists();
       boolean hasFinalizedTmp = getFinalizedTmp().exists();
 
 
-      if( !(hasPreviousTmp || hasRemovedTmp || hasFinalizedTmp) ) {
+      if (!(hasPreviousTmp || hasRemovedTmp || hasFinalizedTmp)) {
         // no temp dirs - no recovery
         // no temp dirs - no recovery
-        if( hasCurrent )
+        if (hasCurrent)
           return StorageState.NORMAL;
           return StorageState.NORMAL;
-        if( hasPrevious )
-          throw new InconsistentFSStateException( root,
-                      "version file in current directory it is missing." );
+        if (hasPrevious)
+          throw new InconsistentFSStateException(root,
+                                                 "version file in current directory it is missing.");
         return StorageState.NOT_FORMATTED;
         return StorageState.NOT_FORMATTED;
       }
       }
 
 
-      if( (hasPreviousTmp?1:0)+(hasRemovedTmp?1:0)+(hasFinalizedTmp?1:0) > 1 )
+      if ((hasPreviousTmp?1:0)+(hasRemovedTmp?1:0)+(hasFinalizedTmp?1:0) > 1)
         // more than one temp dirs
         // more than one temp dirs
-        throw new InconsistentFSStateException( root,
-                    "too many temporary directories." );
+        throw new InconsistentFSStateException(root,
+                                               "too many temporary directories.");
 
 
       // # of temp dirs == 1 should either recover or complete a transition
       // # of temp dirs == 1 should either recover or complete a transition
-      if( hasFinalizedTmp ) {
-        if( hasPrevious )
-          throw new InconsistentFSStateException( root,
-              STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_FINALIZED
-              + "cannot exist together." );
+      if (hasFinalizedTmp) {
+        if (hasPrevious)
+          throw new InconsistentFSStateException(root,
+                                                 STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_FINALIZED
+                                                 + "cannot exist together.");
         return StorageState.COMPLETE_FINALIZE;
         return StorageState.COMPLETE_FINALIZE;
       }
       }
 
 
-      if( hasPreviousTmp ) {
-        if( hasPrevious )
-          throw new InconsistentFSStateException( root,
-              STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_PREVIOUS
-              + " cannot exist together." );
-        if( hasCurrent )
+      if (hasPreviousTmp) {
+        if (hasPrevious)
+          throw new InconsistentFSStateException(root,
+                                                 STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_PREVIOUS
+                                                 + " cannot exist together.");
+        if (hasCurrent)
           return StorageState.COMPLETE_UPGRADE;
           return StorageState.COMPLETE_UPGRADE;
         return StorageState.RECOVER_UPGRADE;
         return StorageState.RECOVER_UPGRADE;
       }
       }
       
       
       assert hasRemovedTmp : "hasRemovedTmp must be true";
       assert hasRemovedTmp : "hasRemovedTmp must be true";
-      if( !(hasCurrent ^ hasPrevious) )
-        throw new InconsistentFSStateException( root,
-            "one and only one directory " + STORAGE_DIR_CURRENT 
-            + " or " + STORAGE_DIR_PREVIOUS 
-            + " must be present when " + STORAGE_TMP_REMOVED
-            + " exists." );
-      if( hasCurrent )
+      if (!(hasCurrent ^ hasPrevious))
+        throw new InconsistentFSStateException(root,
+                                               "one and only one directory " + STORAGE_DIR_CURRENT 
+                                               + " or " + STORAGE_DIR_PREVIOUS 
+                                               + " must be present when " + STORAGE_TMP_REMOVED
+                                               + " exists.");
+      if (hasCurrent)
         return StorageState.COMPLETE_ROLLBACK;
         return StorageState.COMPLETE_ROLLBACK;
       return StorageState.RECOVER_ROLLBACK;
       return StorageState.RECOVER_ROLLBACK;
     }
     }
@@ -323,39 +323,39 @@ abstract class Storage extends StorageInfo {
      * @param curState specifies what/how the state should be recovered
      * @param curState specifies what/how the state should be recovered
      * @throws IOException
      * @throws IOException
      */
      */
-    void doRecover( StorageState curState ) throws IOException {
+    void doRecover(StorageState curState) throws IOException {
       File curDir = getCurrentDir();
       File curDir = getCurrentDir();
       String rootPath = root.getCanonicalPath();
       String rootPath = root.getCanonicalPath();
-      switch( curState ) {
-        case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
-          LOG.info( "Completing previous upgrade for storage directory " 
-                    + rootPath + "." );
-          rename( getPreviousTmp(), getPreviousDir() );
-          return;
-        case RECOVER_UPGRADE:   // mv previous.tmp -> current
-          LOG.info( "Recovering storage directory " + rootPath
-                    + " from previous upgrade." );
-          if( curDir.exists() )
-            deleteDir( curDir );
-          rename( getPreviousTmp(), curDir );
-          return;
-        case COMPLETE_ROLLBACK: // rm removed.tmp
-          LOG.info( "Completing previous rollback for storage directory "
-                    + rootPath + "." );
-          deleteDir( getRemovedTmp() );
-          return;
-        case RECOVER_ROLLBACK:  // mv removed.tmp -> current
-          LOG.info( "Recovering storage directory " + rootPath
-                    + " from previous rollback." );
-          rename( getRemovedTmp(), curDir );
-          return;
-        case COMPLETE_FINALIZE: // rm finalized.tmp
-          LOG.info( "Completing previous finalize for storage directory "
-                    + rootPath + "." );
-          deleteDir( getFinalizedTmp() );
-          return;
-        default:
-          throw new IOException( "Unexpected FS state: " + curState );
+      switch(curState) {
+      case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
+        LOG.info("Completing previous upgrade for storage directory " 
+                 + rootPath + ".");
+        rename(getPreviousTmp(), getPreviousDir());
+        return;
+      case RECOVER_UPGRADE:   // mv previous.tmp -> current
+        LOG.info("Recovering storage directory " + rootPath
+                 + " from previous upgrade.");
+        if (curDir.exists())
+          deleteDir(curDir);
+        rename(getPreviousTmp(), curDir);
+        return;
+      case COMPLETE_ROLLBACK: // rm removed.tmp
+        LOG.info("Completing previous rollback for storage directory "
+                 + rootPath + ".");
+        deleteDir(getRemovedTmp());
+        return;
+      case RECOVER_ROLLBACK:  // mv removed.tmp -> current
+        LOG.info("Recovering storage directory " + rootPath
+                 + " from previous rollback.");
+        rename(getRemovedTmp(), curDir);
+        return;
+      case COMPLETE_FINALIZE: // rm finalized.tmp
+        LOG.info("Completing previous finalize for storage directory "
+                 + rootPath + ".");
+        deleteDir(getFinalizedTmp());
+        return;
+      default:
+        throw new IOException("Unexpected FS state: " + curState);
       }
       }
     }
     }
 
 
@@ -365,22 +365,22 @@ abstract class Storage extends StorageInfo {
      * @throws IOException if locking fails
      * @throws IOException if locking fails
      */
      */
     void lock() throws IOException {
     void lock() throws IOException {
-      File lockF = new File( root, STORAGE_FILE_LOCK );
+      File lockF = new File(root, STORAGE_FILE_LOCK);
       lockF.deleteOnExit();
       lockF.deleteOnExit();
-      RandomAccessFile file = new RandomAccessFile( lockF, "rws" );
+      RandomAccessFile file = new RandomAccessFile(lockF, "rws");
       try {
       try {
         this.lock = file.getChannel().tryLock();
         this.lock = file.getChannel().tryLock();
-      } catch( IOException e ) {
-        LOG.info( StringUtils.stringifyException(e) );
+      } catch(IOException e) {
+        LOG.info(StringUtils.stringifyException(e));
         file.close();
         file.close();
         throw e;
         throw e;
       }
       }
-      if( lock == null ) {
+      if (lock == null) {
         String msg = "Cannot lock storage " + this.root 
         String msg = "Cannot lock storage " + this.root 
-                      + ". The directory is already locked.";
-        LOG.info( msg );
+          + ". The directory is already locked.";
+        LOG.info(msg);
         file.close();
         file.close();
-        throw new IOException( msg );
+        throw new IOException(msg);
       }
       }
     }
     }
 
 
@@ -390,7 +390,7 @@ abstract class Storage extends StorageInfo {
      * @throws IOException
      * @throws IOException
      */
      */
     void unlock() throws IOException {
     void unlock() throws IOException {
-      if( this.lock == null )
+      if (this.lock == null)
         return;
         return;
       this.lock.release();
       this.lock.release();
       lock.channel().close();
       lock.channel().close();
@@ -400,18 +400,18 @@ abstract class Storage extends StorageInfo {
   /**
   /**
    * Create empty storage info of the specified type
    * Create empty storage info of the specified type
    */
    */
-  Storage( NodeType type ) {
+  Storage(NodeType type) {
     super();
     super();
     this.storageType = type;
     this.storageType = type;
   }
   }
   
   
-  Storage( NodeType type, int nsID, long cT ) {
-    super( FSConstants.LAYOUT_VERSION, nsID, cT );
+  Storage(NodeType type, int nsID, long cT) {
+    super(FSConstants.LAYOUT_VERSION, nsID, cT);
     this.storageType = type;
     this.storageType = type;
   }
   }
   
   
-  Storage( NodeType type, StorageInfo storageInfo ) {
-    super( storageInfo );
+  Storage(NodeType type, StorageInfo storageInfo) {
+    super(storageInfo);
     this.storageType = type;
     this.storageType = type;
   }
   }
   
   
@@ -419,15 +419,15 @@ abstract class Storage extends StorageInfo {
     return storageDirs.size();
     return storageDirs.size();
   }
   }
   
   
-  StorageDirectory getStorageDir( int idx ) {
-    return storageDirs.get( idx );
+  StorageDirectory getStorageDir(int idx) {
+    return storageDirs.get(idx);
   }
   }
   
   
-  protected void addStorageDir( StorageDirectory sd ) {
-    storageDirs.add( sd );
+  protected void addStorageDir(StorageDirectory sd) {
+    storageDirs.add(sd);
   }
   }
   
   
-  abstract boolean isConversionNeeded( StorageDirectory sd ) throws IOException;
+  abstract boolean isConversionNeeded(StorageDirectory sd) throws IOException;
   
   
   /**
   /**
    * Get common storage fields.
    * Get common storage fields.
@@ -436,28 +436,28 @@ abstract class Storage extends StorageInfo {
    * @param props
    * @param props
    * @throws IOException
    * @throws IOException
    */
    */
-  protected void getFields( Properties props, 
-                            StorageDirectory sd 
-                          ) throws IOException {
+  protected void getFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
     String sv, st, sid, sct;
     String sv, st, sid, sct;
-    sv = props.getProperty( "layoutVersion" );
-    st = props.getProperty( "storageType" );
-    sid = props.getProperty( "namespaceID" );
-    sct = props.getProperty( "cTime" );
-    if( sv == null || st == null || sid == null || sct == null )
-      throw new InconsistentFSStateException( sd.root,
-                    "file " + STORAGE_FILE_VERSION + " is invalid." );
-    int rv = Integer.parseInt( sv );
-    NodeType rt = NodeType.valueOf( st );
-    int rid = Integer.parseInt( sid );
-    long rct = Long.parseLong( sct );
-    if( ! storageType.equals( rt ) ||
-        ! (( namespaceID == 0 ) || ( rid == 0 ) || namespaceID == rid ))
-      throw new InconsistentFSStateException( sd.root,
-                  "is incompatible with others." );
-    if( rv < FSConstants.LAYOUT_VERSION ) // future version
-        throw new IncorrectVersionException(rv, "storage directory " 
-                                            + sd.root.getCanonicalPath() );
+    sv = props.getProperty("layoutVersion");
+    st = props.getProperty("storageType");
+    sid = props.getProperty("namespaceID");
+    sct = props.getProperty("cTime");
+    if (sv == null || st == null || sid == null || sct == null)
+      throw new InconsistentFSStateException(sd.root,
+                                             "file " + STORAGE_FILE_VERSION + " is invalid.");
+    int rv = Integer.parseInt(sv);
+    NodeType rt = NodeType.valueOf(st);
+    int rid = Integer.parseInt(sid);
+    long rct = Long.parseLong(sct);
+    if (!storageType.equals(rt) ||
+        !((namespaceID == 0) || (rid == 0) || namespaceID == rid))
+      throw new InconsistentFSStateException(sd.root,
+                                             "is incompatible with others.");
+    if (rv < FSConstants.LAYOUT_VERSION) // future version
+      throw new IncorrectVersionException(rv, "storage directory " 
+                                          + sd.root.getCanonicalPath());
     layoutVersion = rv;
     layoutVersion = rv;
     storageType = rt;
     storageType = rt;
     namespaceID = rid;
     namespaceID = rid;
@@ -471,24 +471,24 @@ abstract class Storage extends StorageInfo {
    * @param props
    * @param props
    * @throws IOException
    * @throws IOException
    */
    */
-  protected void setFields( Properties props, 
-                            StorageDirectory sd 
-                          ) throws IOException {
-    props.setProperty( "layoutVersion", String.valueOf( layoutVersion ));
-    props.setProperty( "storageType", storageType.toString() );
-    props.setProperty( "namespaceID", String.valueOf( namespaceID ));
-    props.setProperty( "cTime", String.valueOf( cTime ));
+  protected void setFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    props.setProperty("layoutVersion", String.valueOf(layoutVersion));
+    props.setProperty("storageType", storageType.toString());
+    props.setProperty("namespaceID", String.valueOf(namespaceID));
+    props.setProperty("cTime", String.valueOf(cTime));
   }
   }
 
 
-  static void rename( File from, File to ) throws IOException {
-    if( ! from.renameTo( to ))
-      throw new IOException( "Failed to rename " 
-          + from.getCanonicalPath() + " to " + to.getCanonicalPath() );
+  static void rename(File from, File to) throws IOException {
+    if (!from.renameTo(to))
+      throw new IOException("Failed to rename " 
+                            + from.getCanonicalPath() + " to " + to.getCanonicalPath());
   }
   }
 
 
-  static void deleteDir( File dir ) throws IOException {
-    if( ! FileUtil.fullyDelete( dir ) )
-      throw new IOException( "Failed to delete " + dir.getCanonicalPath() );
+  static void deleteDir(File dir) throws IOException {
+    if (!FileUtil.fullyDelete(dir))
+      throw new IOException("Failed to delete " + dir.getCanonicalPath());
   }
   }
   
   
   /**
   /**
@@ -516,9 +516,9 @@ abstract class Storage extends StorageInfo {
     return VersionInfo.getRevision();
     return VersionInfo.getRevision();
   }
   }
 
 
-  static String getRegistrationID( StorageInfo storage ) {
-    return "NS-" + Integer.toString( storage.getNamespaceID() )
-           + "-" + Integer.toString( storage.getLayoutVersion() )
-           + "-" + Long.toString( storage.getCTime() );
+  static String getRegistrationID(StorageInfo storage) {
+    return "NS-" + Integer.toString(storage.getNamespaceID())
+      + "-" + Integer.toString(storage.getLayoutVersion())
+      + "-" + Long.toString(storage.getCTime());
   }
   }
 }
 }

+ 4 - 4
src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java

@@ -11,12 +11,12 @@ import java.io.IOException;
  */
  */
 class UnregisteredDatanodeException extends IOException {
 class UnregisteredDatanodeException extends IOException {
 
 
-  public UnregisteredDatanodeException( DatanodeID nodeID ) {
-    super("Unregistered data node: " + nodeID.getName() );
+  public UnregisteredDatanodeException(DatanodeID nodeID) {
+    super("Unregistered data node: " + nodeID.getName());
   }
   }
 
 
-  public UnregisteredDatanodeException( DatanodeID nodeID, 
-                                        DatanodeInfo storedNode ) {
+  public UnregisteredDatanodeException(DatanodeID nodeID, 
+                                       DatanodeInfo storedNode) {
     super("Data node " + nodeID.getName() 
     super("Data node " + nodeID.getName() 
           + " is attempting to report storage ID "
           + " is attempting to report storage ID "
           + nodeID.getStorageID() + ". Node " 
           + nodeID.getStorageID() + ". Node " 

+ 4 - 4
src/java/org/apache/hadoop/filecache/DistributedCache.java

@@ -288,9 +288,9 @@ public class DistributedCache {
     byte[] digest = null;
     byte[] digest = null;
 
 
     FileSystem fileSystem = getFileSystem(cache, conf);
     FileSystem fileSystem = getFileSystem(cache, conf);
-    if(!(fileSystem instanceof ChecksumFileSystem)) {
-      throw new IOException( "Not a checksummed file system: "
-                             +fileSystem.getUri() );
+    if (!(fileSystem instanceof ChecksumFileSystem)) {
+      throw new IOException("Not a checksummed file system: "
+                            +fileSystem.getUri());
     }
     }
     String filename = cache.getPath();
     String filename = cache.getPath();
     Path filePath = new Path(filename);
     Path filePath = new Path(filename);
@@ -304,7 +304,7 @@ public class DistributedCache {
     }
     }
     if (!fileSystem.exists(md5File)) {
     if (!fileSystem.exists(md5File)) {
       ChecksumFileSystem checksumFs;
       ChecksumFileSystem checksumFs;
-      if(!(fileSystem instanceof ChecksumFileSystem)) {
+      if (!(fileSystem instanceof ChecksumFileSystem)) {
         throw new IOException(
         throw new IOException(
                               "Not a checksumed file system: "+fileSystem.getUri());
                               "Not a checksumed file system: "+fileSystem.getUri());
       } else {
       } else {

+ 22 - 22
src/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -112,7 +112,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
       this.file = file;
       this.file = file;
       Path sumFile = fs.getChecksumFile(file);
       Path sumFile = fs.getChecksumFile(file);
       try {
       try {
-        int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(),bufferSize);
+        int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(), bufferSize);
         sums = fs.getRawFileSystem().open(sumFile, sumBufferSize);
         sums = fs.getRawFileSystem().open(sumFile, sumBufferSize);
 
 
         byte[] version = new byte[CHECKSUM_VERSION.length];
         byte[] version = new byte[CHECKSUM_VERSION.length];
@@ -133,14 +133,14 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     public void seek(long desired) throws IOException {
     public void seek(long desired) throws IOException {
       // seek to a checksum boundary
       // seek to a checksum boundary
       long checksumBoundary = desired/bytesPerSum*bytesPerSum;
       long checksumBoundary = desired/bytesPerSum*bytesPerSum;
-      if(checksumBoundary != getPos()) {
+      if (checksumBoundary != getPos()) {
         datas.seek(checksumBoundary);
         datas.seek(checksumBoundary);
-        if(sums != null) {
+        if (sums != null) {
           sums.seek(HEADER_LENGTH + 4*(checksumBoundary/bytesPerSum));
           sums.seek(HEADER_LENGTH + 4*(checksumBoundary/bytesPerSum));
         }
         }
       }
       }
       
       
-      if(sums != null) {
+      if (sums != null) {
         sum.reset();
         sum.reset();
         inSum = 0;
         inSum = 0;
       }
       }
@@ -207,9 +207,9 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
               summed += toSum;
               summed += toSum;
               
               
               inSum += toSum;
               inSum += toSum;
-              if (inSum == bytesPerSum ) {
+              if (inSum == bytesPerSum) {
                 verifySum(read-(summed-bytesPerSum));
                 verifySum(read-(summed-bytesPerSum));
-              } else if( read == summed && endOfFile ) {
+              } else if (read == summed && endOfFile) {
                 verifySum(read-read/bytesPerSum*bytesPerSum);
                 verifySum(read-read/bytesPerSum*bytesPerSum);
               }
               }
             }
             }
@@ -314,7 +314,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     }
     }
 
 
     @Override
     @Override
-      public boolean seekToNewSource(long targetPos) throws IOException {
+    public boolean seekToNewSource(long targetPos) throws IOException {
       return datas.seekToNewSource(targetPos) ||
       return datas.seekToNewSource(targetPos) ||
         sums.seekToNewSource(targetPos/bytesPerSum);
         sums.seekToNewSource(targetPos/bytesPerSum);
     }
     }
@@ -327,7 +327,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * @param bufferSize the size of the buffer to be used.
    * @param bufferSize the size of the buffer to be used.
    */
    */
   @Override
   @Override
-    public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     if (!exists(f)) {
     if (!exists(f)) {
       throw new FileNotFoundException(f.toString());
       throw new FileNotFoundException(f.toString());
     }
     }
@@ -405,7 +405,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     
     
     public void close() throws IOException {
     public void close() throws IOException {
       writeSum();
       writeSum();
-      if(sums != null) {
+      if (sums != null) {
         sums.close();
         sums.close();
       }
       }
       out.close();
       out.close();
@@ -429,8 +429,8 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * @param replication required block replication for the file. 
    * @param replication required block replication for the file. 
    */
    */
   @Override
   @Override
-    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-                                     short replication, long blockSize, Progressable progress)
+  public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
+                                   short replication, long blockSize, Progressable progress)
     throws IOException {
     throws IOException {
     if (exists(f) && !overwrite) {
     if (exists(f) && !overwrite) {
       throw new IOException("File already exists:" + f);
       throw new IOException("File already exists:" + f);
@@ -497,7 +497,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
       return fs.delete(f);
       return fs.delete(f);
     } else {
     } else {
       Path checkFile = getChecksumFile(f);
       Path checkFile = getChecksumFile(f);
-      if(fs.exists(checkFile)) {
+      if (fs.exists(checkFile)) {
         fs.delete(checkFile);
         fs.delete(checkFile);
       }
       }
 
 
@@ -518,7 +518,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * @exception IOException
    * @exception IOException
    */
    */
   @Override
   @Override
-    public Path[] listPaths(Path[] files) throws IOException {
+  public Path[] listPaths(Path[] files) throws IOException {
     return fs.listPaths(files, DEFAULT_FILTER);
     return fs.listPaths(files, DEFAULT_FILTER);
   }
   }
 
 
@@ -533,17 +533,17 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
   }
   }
 
 
   @Override
   @Override
-    public boolean mkdirs(Path f) throws IOException {
+  public boolean mkdirs(Path f) throws IOException {
     return fs.mkdirs(f);
     return fs.mkdirs(f);
   }
   }
 
 
   @Override
   @Override
-    public void lock(Path f, boolean shared) throws IOException {
+  public void lock(Path f, boolean shared) throws IOException {
     if (fs.isDirectory(f)) {
     if (fs.isDirectory(f)) {
       fs.lock(f, shared);
       fs.lock(f, shared);
     } else {
     } else {
       Path checkFile = getChecksumFile(f);
       Path checkFile = getChecksumFile(f);
-      if(fs.exists(checkFile)) {
+      if (fs.exists(checkFile)) {
         fs.lock(checkFile, shared);
         fs.lock(checkFile, shared);
       }
       }
       fs.lock(f, shared);
       fs.lock(f, shared);
@@ -551,12 +551,12 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
   }
   }
 
 
   @Override
   @Override
-    public void release(Path f) throws IOException {
+  public void release(Path f) throws IOException {
     if (fs.isDirectory(f)) {
     if (fs.isDirectory(f)) {
       fs.release(f);
       fs.release(f);
     } else {
     } else {
       Path checkFile = getChecksumFile(f);
       Path checkFile = getChecksumFile(f);
-      if(fs.exists(checkFile)) {
+      if (fs.exists(checkFile)) {
         fs.release(getChecksumFile(f));
         fs.release(getChecksumFile(f));
       }
       }
       fs.release(f);
       fs.release(f);
@@ -564,7 +564,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
   }
   }
 
 
   @Override
   @Override
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     throws IOException {
     FileSystem localFs = getNamed("file:///", getConf());
     FileSystem localFs = getNamed("file:///", getConf());
     FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
     FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
@@ -575,7 +575,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * Copy it from FS control to the local dst name.
    * Copy it from FS control to the local dst name.
    */
    */
   @Override
   @Override
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     throws IOException {
     FileSystem localFs = getNamed("file:///", getConf());
     FileSystem localFs = getNamed("file:///", getConf());
     FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
     FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
@@ -615,13 +615,13 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
   }
   }
 
 
   @Override
   @Override
-    public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+  public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
     throws IOException {
     return tmpLocalFile;
     return tmpLocalFile;
   }
   }
 
 
   @Override
   @Override
-    public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+  public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
     throws IOException {
     moveFromLocalFile(tmpLocalFile, fsOutputFile);
     moveFromLocalFile(tmpLocalFile, fsOutputFile);
   }
   }

+ 7 - 7
src/java/org/apache/hadoop/fs/DF.java

@@ -42,19 +42,19 @@ public class DF {
   private int percentUsed;
   private int percentUsed;
   private String mount;
   private String mount;
   
   
-  public DF(File path, Configuration conf ) throws IOException {
-    this( path, conf.getLong( "dfs.df.interval", DF.DF_INTERVAL_DEFAULT ));
+  public DF(File path, Configuration conf) throws IOException {
+    this(path, conf.getLong("dfs.df.interval", DF.DF_INTERVAL_DEFAULT));
   }
   }
 
 
   public DF(File path, long dfInterval) throws IOException {
   public DF(File path, long dfInterval) throws IOException {
     this.dirPath = path.getCanonicalPath();
     this.dirPath = path.getCanonicalPath();
     this.dfInterval = dfInterval;
     this.dfInterval = dfInterval;
-    lastDF = ( dfInterval < 0 ) ? 0 : -dfInterval;
+    lastDF = (dfInterval < 0) ? 0 : -dfInterval;
     this.doDF();
     this.doDF();
   }
   }
   
   
   private void doDF() throws IOException { 
   private void doDF() throws IOException { 
-    if( lastDF + dfInterval > System.currentTimeMillis() )
+    if (lastDF + dfInterval > System.currentTimeMillis())
       return;
       return;
     Process process;
     Process process;
     process = Runtime.getRuntime().exec(getExecString());
     process = Runtime.getRuntime().exec(getExecString());
@@ -138,10 +138,10 @@ public class DF {
   }
   }
 
 
   private String[] getExecString() {
   private String[] getExecString() {
-    return new String[] {"df","-k",dirPath};
+    return new String[] {"df","-k", dirPath};
   }
   }
   
   
-  private void parseExecResult( BufferedReader lines ) throws IOException {
+  private void parseExecResult(BufferedReader lines) throws IOException {
     lines.readLine();                         // skip headings
     lines.readLine();                         // skip headings
   
   
     StringTokenizer tokens =
     StringTokenizer tokens =
@@ -161,7 +161,7 @@ public class DF {
 
 
   public static void main(String[] args) throws Exception {
   public static void main(String[] args) throws Exception {
     String path = ".";
     String path = ".";
-    if( args.length > 0 )
+    if (args.length > 0)
       path = args[0];
       path = args[0];
 
 
     System.out.println(new DF(new File(path), DF_INTERVAL_DEFAULT).toString());
     System.out.println(new DF(new File(path), DF_INTERVAL_DEFAULT).toString());

+ 9 - 9
src/java/org/apache/hadoop/fs/FSDataInputStream.java

@@ -38,7 +38,7 @@ public class FSDataInputStream extends DataInputStream
     // calls to it in order to cache the position.
     // calls to it in order to cache the position.
     public int read(byte b[], int off, int len) throws IOException {
     public int read(byte b[], int off, int len) throws IOException {
       int result;
       int result;
-      if( (result = in.read(b, off, len)) > 0 )
+      if ((result = in.read(b, off, len)) > 0)
         position += result;
         position += result;
       return result;
       return result;
     }
     }
@@ -53,12 +53,12 @@ public class FSDataInputStream extends DataInputStream
     }
     }
     
     
     public int read(long position, byte[] buffer, int offset, int length)
     public int read(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       return ((FSInputStream)in).read(position, buffer, offset, length);
       return ((FSInputStream)in).read(position, buffer, offset, length);
     }
     }
     
     
     public void readFully(long position, byte[] buffer, int offset, int length)
     public void readFully(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       ((FSInputStream)in).readFully(position, buffer, offset, length);
       ((FSInputStream)in).readFully(position, buffer, offset, length);
     }
     }
   }
   }
@@ -95,12 +95,12 @@ public class FSDataInputStream extends DataInputStream
     }
     }
 
 
     public int read(long position, byte[] buffer, int offset, int length)
     public int read(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       return ((PositionCache)in).read(position, buffer, offset, length);
       return ((PositionCache)in).read(position, buffer, offset, length);
     }
     }
     
     
     public void readFully(long position, byte[] buffer, int offset, int length)
     public void readFully(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       ((PositionCache)in).readFully(position, buffer, offset, length);
       ((PositionCache)in).readFully(position, buffer, offset, length);
     }
     }
   }
   }
@@ -113,7 +113,7 @@ public class FSDataInputStream extends DataInputStream
   
   
   public FSDataInputStream(FSInputStream in, int bufferSize)
   public FSDataInputStream(FSInputStream in, int bufferSize)
     throws IOException {
     throws IOException {
-    super( new Buffer(new PositionCache(in), bufferSize) );
+    super(new Buffer(new PositionCache(in), bufferSize));
     this.inStream = in;
     this.inStream = in;
   }
   }
   
   
@@ -126,17 +126,17 @@ public class FSDataInputStream extends DataInputStream
   }
   }
   
   
   public int read(long position, byte[] buffer, int offset, int length)
   public int read(long position, byte[] buffer, int offset, int length)
-  throws IOException {
+    throws IOException {
     return ((Buffer)in).read(position, buffer, offset, length);
     return ((Buffer)in).read(position, buffer, offset, length);
   }
   }
   
   
   public void readFully(long position, byte[] buffer, int offset, int length)
   public void readFully(long position, byte[] buffer, int offset, int length)
-  throws IOException {
+    throws IOException {
     ((Buffer)in).readFully(position, buffer, offset, length);
     ((Buffer)in).readFully(position, buffer, offset, length);
   }
   }
   
   
   public void readFully(long position, byte[] buffer)
   public void readFully(long position, byte[] buffer)
-  throws IOException {
+    throws IOException {
     ((Buffer)in).readFully(position, buffer, 0, buffer.length);
     ((Buffer)in).readFully(position, buffer, 0, buffer.length);
   }
   }
   
   

+ 2 - 2
src/java/org/apache/hadoop/fs/FSDataOutputStream.java

@@ -74,12 +74,12 @@ public class FSDataOutputStream extends DataOutputStream {
   }
   }
 
 
   public FSDataOutputStream(OutputStream out, int bufferSize)
   public FSDataOutputStream(OutputStream out, int bufferSize)
-  throws IOException {
+    throws IOException {
     super(new Buffer(new PositionCache(out), bufferSize));
     super(new Buffer(new PositionCache(out), bufferSize));
   }
   }
   
   
   public FSDataOutputStream(OutputStream out, Configuration conf)
   public FSDataOutputStream(OutputStream out, Configuration conf)
-  throws IOException {
+    throws IOException {
     this(out, conf.getInt("io.file.buffer.size", 4096));
     this(out, conf.getInt("io.file.buffer.size", 4096));
   }
   }
 
 

+ 716 - 716
src/java/org/apache/hadoop/fs/FileSystem.java

@@ -48,812 +48,812 @@ import org.apache.hadoop.util.*;
  * @author Mike Cafarella
  * @author Mike Cafarella
  *****************************************************************/
  *****************************************************************/
 public abstract class FileSystem extends Configured {
 public abstract class FileSystem extends Configured {
-    public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.FileSystem");
-
-    // cache indexed by URI scheme and authority
-    private static final Map<String,Map<String,FileSystem>> CACHE
-      = new HashMap<String,Map<String,FileSystem>>();
-    /**
-     * Parse the cmd-line args, starting at i.  Remove consumed args
-     * from array.  We expect param in the form:
-     * '-local | -dfs <namenode:port>'
-     */
-    public static FileSystem parseArgs(String argv[], int i, Configuration conf) throws IOException {
-        /**
-        if (argv.length - i < 1) {
-            throw new IOException("Must indicate filesystem type for DFS");
-        }
-        */
-        int orig = i;
-        FileSystem fs = null;
-        String cmd = argv[i];
-        if ("-dfs".equals(cmd)) {
-            i++;
-            InetSocketAddress addr = DataNode.createSocketAddr(argv[i++]);
-            fs = new DistributedFileSystem(addr, conf);
-        } else if ("-local".equals(cmd)) {
-            i++;
-            fs = FileSystem.getLocal(conf);
-        } else {
-            fs = get(conf);                          // using default
-            LOG.info("No FS indicated, using default:"+fs.getName());
-
-        }
-        System.arraycopy(argv, i, argv, orig, argv.length - i);
-        for (int j = argv.length - i; j < argv.length; j++) {
-            argv[j] = null;
-        }
-        return fs;
-    }
-
-    /** Returns the configured filesystem implementation.*/
-    public static FileSystem get(Configuration conf) throws IOException {
-      return getNamed(conf.get("fs.default.name", "local"), conf);
-    }
-
-    /** Called after a new FileSystem instance is constructed.
-     * @param name a uri whose authority section names the host, port, etc.
-     *   for this FileSystem
-     * @param conf the configuration
-     */
-    public abstract void initialize(URI name, Configuration conf)
-      throws IOException;
+  public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.FileSystem");
+
+  // cache indexed by URI scheme and authority
+  private static final Map<String,Map<String,FileSystem>> CACHE
+    = new HashMap<String,Map<String,FileSystem>>();
+  /**
+   * Parse the cmd-line args, starting at i.  Remove consumed args
+   * from array.  We expect param in the form:
+   * '-local | -dfs <namenode:port>'
+   */
+  public static FileSystem parseArgs(String argv[], int i, Configuration conf) throws IOException {
+    /**
+       if (argv.length - i < 1) {
+       throw new IOException("Must indicate filesystem type for DFS");
+       }
+    */
+    int orig = i;
+    FileSystem fs = null;
+    String cmd = argv[i];
+    if ("-dfs".equals(cmd)) {
+      i++;
+      InetSocketAddress addr = DataNode.createSocketAddr(argv[i++]);
+      fs = new DistributedFileSystem(addr, conf);
+    } else if ("-local".equals(cmd)) {
+      i++;
+      fs = FileSystem.getLocal(conf);
+    } else {
+      fs = get(conf);                          // using default
+      LOG.info("No FS indicated, using default:"+fs.getName());
+
+    }
+    System.arraycopy(argv, i, argv, orig, argv.length - i);
+    for (int j = argv.length - i; j < argv.length; j++) {
+      argv[j] = null;
+    }
+    return fs;
+  }
+
+  /** Returns the configured filesystem implementation.*/
+  public static FileSystem get(Configuration conf) throws IOException {
+    return getNamed(conf.get("fs.default.name", "local"), conf);
+  }
+
+  /** Called after a new FileSystem instance is constructed.
+   * @param name a uri whose authority section names the host, port, etc.
+   *   for this FileSystem
+   * @param conf the configuration
+   */
+  public abstract void initialize(URI name, Configuration conf)
+    throws IOException;
 
 
-    /** Returns a URI whose scheme and authority identify this FileSystem.*/
-    public abstract URI getUri();
+  /** Returns a URI whose scheme and authority identify this FileSystem.*/
+  public abstract URI getUri();
   
   
-    /** @deprecated call #getUri() instead.*/
-    public abstract String getName();
-
-    /** @deprecated call #get(URI,Configuration) instead. */
-    public static FileSystem getNamed(String name, Configuration conf)
-      throws IOException {
-
-      // convert old-format name to new-format name
-      if (name.equals("local")) {         // "local" is now "file:///".
-        name = "file:///";
-      } else if (name.indexOf('/')==-1) {   // unqualified is "hdfs://"
-        name = "hdfs://"+name;
-      }
+  /** @deprecated call #getUri() instead.*/
+  public abstract String getName();
 
 
-      return get(URI.create(name), conf);
-    }
+  /** @deprecated call #get(URI,Configuration) instead. */
+  public static FileSystem getNamed(String name, Configuration conf)
+    throws IOException {
 
 
-    /**
-     * Get the local file syste
-     * @param conf the configuration to configure the file system with
-     * @return a LocalFileSystem
-     */
-    public static LocalFileSystem getLocal(Configuration conf)
-      throws IOException {
-      return (LocalFileSystem)get(LocalFileSystem.NAME, conf);
+    // convert old-format name to new-format name
+    if (name.equals("local")) {         // "local" is now "file:///".
+      name = "file:///";
+    } else if (name.indexOf('/')==-1) {   // unqualified is "hdfs://"
+      name = "hdfs://"+name;
     }
     }
 
 
-    /** Returns the FileSystem for this URI's scheme and authority.  The scheme
-     * of the URI determines a configuration property name,
-     * <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
-     * The entire URI is passed to the FileSystem instance's initialize method.
-     */
-    public static synchronized FileSystem get(URI uri, Configuration conf)
-      throws IOException {
+    return get(URI.create(name), conf);
+  }
 
 
-      String scheme = uri.getScheme();
-      String authority = uri.getAuthority();
+  /**
+   * Get the local file syste
+   * @param conf the configuration to configure the file system with
+   * @return a LocalFileSystem
+   */
+  public static LocalFileSystem getLocal(Configuration conf)
+    throws IOException {
+    return (LocalFileSystem)get(LocalFileSystem.NAME, conf);
+  }
+
+  /** Returns the FileSystem for this URI's scheme and authority.  The scheme
+   * of the URI determines a configuration property name,
+   * <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+   * The entire URI is passed to the FileSystem instance's initialize method.
+   */
+  public static synchronized FileSystem get(URI uri, Configuration conf)
+    throws IOException {
 
 
-      if (scheme == null) {                       // no scheme: use default FS
-        return get(conf);
-      }
+    String scheme = uri.getScheme();
+    String authority = uri.getAuthority();
 
 
-      Map<String,FileSystem> authorityToFs = CACHE.get(scheme);
-      if (authorityToFs == null) {
-        authorityToFs = new HashMap<String,FileSystem>();
-        CACHE.put(scheme, authorityToFs);
-      }
-
-      FileSystem fs = authorityToFs.get(authority);
-      if (fs == null) {
-        Class fsClass = conf.getClass("fs."+scheme+".impl", null);
-        if (fsClass == null) {
-          throw new IOException("No FileSystem for scheme: " + scheme);
-        }
-        fs = (FileSystem)ReflectionUtils.newInstance(fsClass, conf);
-        fs.initialize(uri, conf);
-        authorityToFs.put(authority, fs);
-      }
+    if (scheme == null) {                       // no scheme: use default FS
+      return get(conf);
+    }
 
 
-      return fs;
+    Map<String,FileSystem> authorityToFs = CACHE.get(scheme);
+    if (authorityToFs == null) {
+      authorityToFs = new HashMap<String,FileSystem>();
+      CACHE.put(scheme, authorityToFs);
     }
     }
 
 
-    /**
-     * Close all cached filesystems. Be sure those filesystems are not
-     * used anymore.
-     * 
-     * @throws IOException
-     */
-    public static synchronized void closeAll() throws IOException{
-      for(Map<String, FileSystem>  fss : CACHE.values()){
-        for(FileSystem fs : fss.values()){
-          fs.close();
-        }
+    FileSystem fs = authorityToFs.get(authority);
+    if (fs == null) {
+      Class fsClass = conf.getClass("fs."+scheme+".impl", null);
+      if (fsClass == null) {
+        throw new IOException("No FileSystem for scheme: " + scheme);
+      }
+      fs = (FileSystem)ReflectionUtils.newInstance(fsClass, conf);
+      fs.initialize(uri, conf);
+      authorityToFs.put(authority, fs);
+    }
+
+    return fs;
+  }
+
+  /**
+   * Close all cached filesystems. Be sure those filesystems are not
+   * used anymore.
+   * 
+   * @throws IOException
+   */
+  public static synchronized void closeAll() throws IOException{
+    for(Map<String, FileSystem>  fss : CACHE.values()){
+      for(FileSystem fs : fss.values()){
+        fs.close();
       }
       }
     }
     }
+  }
 
 
-    /** Make sure that a path specifies a FileSystem. */
-    public Path makeQualified(Path path) {
-      checkPath(path);
+  /** Make sure that a path specifies a FileSystem. */
+  public Path makeQualified(Path path) {
+    checkPath(path);
 
 
-      if (!path.isAbsolute())
-        path = new Path(getWorkingDirectory(), path);
+    if (!path.isAbsolute())
+      path = new Path(getWorkingDirectory(), path);
 
 
-      URI pathUri = path.toUri();
-      URI fsUri = getUri();
+    URI pathUri = path.toUri();
+    URI fsUri = getUri();
       
       
-      String scheme = pathUri.getScheme();
-      String authority = pathUri.getAuthority();
+    String scheme = pathUri.getScheme();
+    String authority = pathUri.getAuthority();
 
 
-      if (scheme != null &&
-          (authority != null || fsUri.getAuthority() == null))
-        return path;
+    if (scheme != null &&
+        (authority != null || fsUri.getAuthority() == null))
+      return path;
 
 
-      if (scheme == null) {
-        scheme = fsUri.getScheme();
-      }
+    if (scheme == null) {
+      scheme = fsUri.getScheme();
+    }
 
 
+    if (authority == null) {
+      authority = fsUri.getAuthority();
       if (authority == null) {
       if (authority == null) {
-        authority = fsUri.getAuthority();
-        if (authority == null) {
-          authority = "";
-        }
+        authority = "";
       }
       }
-
-      return new Path(scheme+":"+"//"+authority + pathUri.getPath());
-    }
-    
-    ///////////////////////////////////////////////////////////////
-    // FileSystem
-    ///////////////////////////////////////////////////////////////
-
-    protected FileSystem() {
-      super(null);
     }
     }
 
 
-    /** Check that a Path belongs to this FileSystem. */
-    protected void checkPath(Path path) {
-      URI uri = path.toUri();
-      if (uri.getScheme() == null)                // fs is relative 
-        return;
-      String thisAuthority = this.getUri().getAuthority();
-      String thatAuthority = uri.getAuthority();
-      if (!(this.getUri().getScheme().equals(uri.getScheme()) &&
-            (thisAuthority == null && thatAuthority == null)
-            || thisAuthority.equals(thatAuthority)))
-        throw new IllegalArgumentException("Wrong FS: "+path+
-                                           ", expected: "+this.getUri());
-    }
-
-    /**
-     * Return a 2D array of size 1x1 or greater, containing hostnames 
-     * where portions of the given file can be found.  For a nonexistent 
-     * file or regions, null will be returned.
-     *
-     * This call is most helpful with DFS, where it returns 
-     * hostnames of machines that contain the given file.
-     *
-     * The FileSystem will simply return an elt containing 'localhost'.
-     */
-    public abstract String[][] getFileCacheHints(Path f, long start, long len) throws IOException;
-
-    /**
-     * Opens an FSDataInputStream at the indicated Path.
-     * @param f the file name to open
-     * @param bufferSize the size of the buffer to be used.
-     */
-    public abstract FSDataInputStream open(Path f, int bufferSize)
+    return new Path(scheme+":"+"//"+authority + pathUri.getPath());
+  }
+    
+  ///////////////////////////////////////////////////////////////
+  // FileSystem
+  ///////////////////////////////////////////////////////////////
+
+  protected FileSystem() {
+    super(null);
+  }
+
+  /** Check that a Path belongs to this FileSystem. */
+  protected void checkPath(Path path) {
+    URI uri = path.toUri();
+    if (uri.getScheme() == null)                // fs is relative 
+      return;
+    String thisAuthority = this.getUri().getAuthority();
+    String thatAuthority = uri.getAuthority();
+    if (!(this.getUri().getScheme().equals(uri.getScheme()) &&
+          (thisAuthority == null && thatAuthority == null)
+          || thisAuthority.equals(thatAuthority)))
+      throw new IllegalArgumentException("Wrong FS: "+path+
+                                         ", expected: "+this.getUri());
+  }
+
+  /**
+   * Return a 2D array of size 1x1 or greater, containing hostnames 
+   * where portions of the given file can be found.  For a nonexistent 
+   * file or regions, null will be returned.
+   *
+   * This call is most helpful with DFS, where it returns 
+   * hostnames of machines that contain the given file.
+   *
+   * The FileSystem will simply return an elt containing 'localhost'.
+   */
+  public abstract String[][] getFileCacheHints(Path f, long start, long len) throws IOException;
+
+  /**
+   * Opens an FSDataInputStream at the indicated Path.
+   * @param f the file name to open
+   * @param bufferSize the size of the buffer to be used.
+   */
+  public abstract FSDataInputStream open(Path f, int bufferSize)
     throws IOException;
     throws IOException;
     
     
-    /**
-     * Opens an FSDataInputStream at the indicated Path.
-     * @param f the file to open
-     */
-    public FSDataInputStream open(Path f) throws IOException {
-      return open(f, getConf().getInt("io.file.buffer.size", 4096));
-    }
-
-    /**
-     * Opens an FSDataOutputStream at the indicated Path.
-     * Files are overwritten by default.
-     */
-    public FSDataOutputStream create(Path f) throws IOException {
-      return create(f, true, 
-                    getConf().getInt("io.file.buffer.size", 4096),
-                    getDefaultReplication(),
-                    getDefaultBlockSize());
-    }
-
-    /**
-     * Create an FSDataOutputStream at the indicated Path with write-progress
-     * reporting.
-     * Files are overwritten by default.
-     */
-    public FSDataOutputStream create(Path f, Progressable progress) throws IOException {
-      return create(f, true, 
-                    getConf().getInt("io.file.buffer.size", 4096),
-                    getDefaultReplication(),
-                    getDefaultBlockSize(), progress);
-    }
-
-    /**
-     * Opens an FSDataOutputStream at the indicated Path.
-     * Files are overwritten by default.
-     */
-    public FSDataOutputStream create(Path f, short replication)
-      throws IOException {
-      return create(f, true, 
-                    getConf().getInt("io.file.buffer.size", 4096),
-                    replication,
-                    getDefaultBlockSize());
-    }
-
-    /**
-     * Opens an FSDataOutputStream at the indicated Path with write-progress
-     * reporting.
-     * Files are overwritten by default.
-     */
-    public FSDataOutputStream create(Path f, short replication, Progressable progress)
-      throws IOException {
-      return create(f, true, 
-                    getConf().getInt("io.file.buffer.size", 4096),
-                    replication,
-                    getDefaultBlockSize(), progress);
-    }
+  /**
+   * Opens an FSDataInputStream at the indicated Path.
+   * @param f the file to open
+   */
+  public FSDataInputStream open(Path f) throws IOException {
+    return open(f, getConf().getInt("io.file.buffer.size", 4096));
+  }
+
+  /**
+   * Opens an FSDataOutputStream at the indicated Path.
+   * Files are overwritten by default.
+   */
+  public FSDataOutputStream create(Path f) throws IOException {
+    return create(f, true, 
+                  getConf().getInt("io.file.buffer.size", 4096),
+                  getDefaultReplication(),
+                  getDefaultBlockSize());
+  }
+
+  /**
+   * Create an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * Files are overwritten by default.
+   */
+  public FSDataOutputStream create(Path f, Progressable progress) throws IOException {
+    return create(f, true, 
+                  getConf().getInt("io.file.buffer.size", 4096),
+                  getDefaultReplication(),
+                  getDefaultBlockSize(), progress);
+  }
+
+  /**
+   * Opens an FSDataOutputStream at the indicated Path.
+   * Files are overwritten by default.
+   */
+  public FSDataOutputStream create(Path f, short replication)
+    throws IOException {
+    return create(f, true, 
+                  getConf().getInt("io.file.buffer.size", 4096),
+                  replication,
+                  getDefaultBlockSize());
+  }
+
+  /**
+   * Opens an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * Files are overwritten by default.
+   */
+  public FSDataOutputStream create(Path f, short replication, Progressable progress)
+    throws IOException {
+    return create(f, true, 
+                  getConf().getInt("io.file.buffer.size", 4096),
+                  replication,
+                  getDefaultBlockSize(), progress);
+  }
 
 
     
     
-    /**
-     * Opens an FSDataOutputStream at the indicated Path.
-     * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
-     * @param bufferSize the size of the buffer to be used.
-     */
-    public FSDataOutputStream create( Path f, 
-                                      boolean overwrite,
-                                      int bufferSize
-                                    ) throws IOException {
-      return create( f, overwrite, bufferSize, 
-                     getDefaultReplication(),
-                     getDefaultBlockSize());
-    }
+  /**
+   * Opens an FSDataOutputStream at the indicated Path.
+   * @param f the file name to open
+   * @param overwrite if a file with this name already exists, then if true,
+   *   the file will be overwritten, and if false an error will be thrown.
+   * @param bufferSize the size of the buffer to be used.
+   */
+  public FSDataOutputStream create(Path f, 
+                                   boolean overwrite,
+                                   int bufferSize
+                                   ) throws IOException {
+    return create(f, overwrite, bufferSize, 
+                  getDefaultReplication(),
+                  getDefaultBlockSize());
+  }
     
     
-    /**
-     * Opens an FSDataOutputStream at the indicated Path with write-progress
-     * reporting.
-     * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
-     * @param bufferSize the size of the buffer to be used.
-     */
-    public FSDataOutputStream create( Path f, 
-                                      boolean overwrite,
-                                      int bufferSize,
-                                      Progressable progress
-                                    ) throws IOException {
-      return create( f, overwrite, bufferSize, 
-                     getDefaultReplication(),
-                     getDefaultBlockSize(), progress);
-    }
+  /**
+   * Opens an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * @param f the file name to open
+   * @param overwrite if a file with this name already exists, then if true,
+   *   the file will be overwritten, and if false an error will be thrown.
+   * @param bufferSize the size of the buffer to be used.
+   */
+  public FSDataOutputStream create(Path f, 
+                                   boolean overwrite,
+                                   int bufferSize,
+                                   Progressable progress
+                                   ) throws IOException {
+    return create(f, overwrite, bufferSize, 
+                  getDefaultReplication(),
+                  getDefaultBlockSize(), progress);
+  }
     
     
     
     
-    /**
-     * Opens an FSDataOutputStream at the indicated Path.
-     * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
-     * @param bufferSize the size of the buffer to be used.
-     * @param replication required block replication for the file. 
-     */
-    public FSDataOutputStream create( Path f, 
-                                      boolean overwrite,
-                                      int bufferSize,
-                                      short replication,
-                                      long blockSize
-                                    ) throws IOException {
-      return create(f, overwrite, bufferSize, replication, blockSize, null);
-    }
-
-    /**
-     * Opens an FSDataOutputStream at the indicated Path with write-progress
-     * reporting.
-     * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
-     * @param bufferSize the size of the buffer to be used.
-     * @param replication required block replication for the file. 
-     */
-    public abstract FSDataOutputStream create( Path f, 
-                                               boolean overwrite,
-                                               int bufferSize,
-                                               short replication,
-                                               long blockSize,
-                                               Progressable progress
-                                             ) throws IOException;
-
-    /**
-     * Creates the given Path as a brand-new zero-length file.  If
-     * create fails, or if it already existed, return false.
-     */
-    public boolean createNewFile(Path f) throws IOException {
-      if (exists(f)) {
-        return false;
-      } else {
-        create(f, false, getConf().getInt("io.file.buffer.size", 4096)).close();
-        return true;
-      }
-    }
-
-    /**
-     * Get replication.
-     * 
-     * @param src file name
-     * @return file replication
-     * @throws IOException
-     */
-    public abstract short getReplication(Path src) throws IOException;
-
-    /**
-     * Set replication for an existing file.
-     * 
-     * @param src file name
-     * @param replication new replication
-     * @throws IOException
-     * @return true if successful;
-     *         false if file does not exist or is a directory
-     */
-    public abstract boolean setReplication(Path src, short replication) throws IOException;
-
-    /**
-     * Renames Path src to Path dst.  Can take place on local fs
-     * or remote DFS.
-     */
-    public abstract boolean rename(Path src, Path dst) throws IOException;
+  /**
+   * Opens an FSDataOutputStream at the indicated Path.
+   * @param f the file name to open
+   * @param overwrite if a file with this name already exists, then if true,
+   *   the file will be overwritten, and if false an error will be thrown.
+   * @param bufferSize the size of the buffer to be used.
+   * @param replication required block replication for the file. 
+   */
+  public FSDataOutputStream create(Path f, 
+                                   boolean overwrite,
+                                   int bufferSize,
+                                   short replication,
+                                   long blockSize
+                                   ) throws IOException {
+    return create(f, overwrite, bufferSize, replication, blockSize, null);
+  }
+
+  /**
+   * Opens an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * @param f the file name to open
+   * @param overwrite if a file with this name already exists, then if true,
+   *   the file will be overwritten, and if false an error will be thrown.
+   * @param bufferSize the size of the buffer to be used.
+   * @param replication required block replication for the file. 
+   */
+  public abstract FSDataOutputStream create(Path f, 
+                                            boolean overwrite,
+                                            int bufferSize,
+                                            short replication,
+                                            long blockSize,
+                                            Progressable progress
+                                            ) throws IOException;
+
+  /**
+   * Creates the given Path as a brand-new zero-length file.  If
+   * create fails, or if it already existed, return false.
+   */
+  public boolean createNewFile(Path f) throws IOException {
+    if (exists(f)) {
+      return false;
+    } else {
+      create(f, false, getConf().getInt("io.file.buffer.size", 4096)).close();
+      return true;
+    }
+  }
+
+  /**
+   * Get replication.
+   * 
+   * @param src file name
+   * @return file replication
+   * @throws IOException
+   */
+  public abstract short getReplication(Path src) throws IOException;
+
+  /**
+   * Set replication for an existing file.
+   * 
+   * @param src file name
+   * @param replication new replication
+   * @throws IOException
+   * @return true if successful;
+   *         false if file does not exist or is a directory
+   */
+  public abstract boolean setReplication(Path src, short replication) throws IOException;
+
+  /**
+   * Renames Path src to Path dst.  Can take place on local fs
+   * or remote DFS.
+   */
+  public abstract boolean rename(Path src, Path dst) throws IOException;
     
     
-    /** Delete a file */
-    public abstract boolean delete(Path f) throws IOException;
+  /** Delete a file */
+  public abstract boolean delete(Path f) throws IOException;
     
     
-    /** Check if exists.
-     * @param f source file
-     */
-    public abstract boolean exists(Path f) throws IOException;
-
-    /** True iff the named path is a directory. */
-    public abstract boolean isDirectory(Path f) throws IOException;
-
-    /** True iff the named path is a regular file. */
-    public boolean isFile(Path f) throws IOException {
-      if (exists(f) && ! isDirectory(f)) {
-        return true;
-      } else {
-        return false;
-      }
-    }
+  /** Check if exists.
+   * @param f source file
+   */
+  public abstract boolean exists(Path f) throws IOException;
+
+  /** True iff the named path is a directory. */
+  public abstract boolean isDirectory(Path f) throws IOException;
+
+  /** True iff the named path is a regular file. */
+  public boolean isFile(Path f) throws IOException {
+    if (exists(f) && !isDirectory(f)) {
+      return true;
+    } else {
+      return false;
+    }
+  }
     
     
-    /** The number of bytes in a file. */
-    public abstract long getLength(Path f) throws IOException;
+  /** The number of bytes in a file. */
+  public abstract long getLength(Path f) throws IOException;
     
     
-    /** Return the number of bytes of the given path 
-     * If <i>f</i> is a file, return the size of the file;
-     * If <i>f</i> is a directory, return the size of the directory tree
-     */
-    public long getContentLength(Path f) throws IOException {
-      if (!isDirectory(f)) {
-        // f is a file
-        return getLength(f);
-      }
+  /** Return the number of bytes of the given path 
+   * If <i>f</i> is a file, return the size of the file;
+   * If <i>f</i> is a directory, return the size of the directory tree
+   */
+  public long getContentLength(Path f) throws IOException {
+    if (!isDirectory(f)) {
+      // f is a file
+      return getLength(f);
+    }
       
       
-      // f is a diretory
-      Path[] contents = listPaths(f);
-      long size = 0;
-      for(int i=0; i<contents.length; i++) {
-        size += getContentLength(contents[i]);
-      }
-      return size;
+    // f is a diretory
+    Path[] contents = listPaths(f);
+    long size = 0;
+    for(int i=0; i<contents.length; i++) {
+      size += getContentLength(contents[i]);
     }
     }
+    return size;
+  }
 
 
-    final private static PathFilter DEFAULT_FILTER = new PathFilter() {
+  final private static PathFilter DEFAULT_FILTER = new PathFilter() {
       public boolean accept(Path file) {
       public boolean accept(Path file) {
         return true;
         return true;
       }     
       }     
     };
     };
     
     
-    /** List files in a directory. */
-    public abstract Path[] listPaths(Path f) throws IOException;
+  /** List files in a directory. */
+  public abstract Path[] listPaths(Path f) throws IOException;
     
     
-    /** 
-     * Filter files in the given pathes using the default checksum filter. 
-     * @param files a list of paths
-     * @return a list of files under the source paths
-     * @exception IOException
-     */
-    public Path[] listPaths(Path[] files ) throws IOException {
-      return listPaths(files, DEFAULT_FILTER);
-    }
-
-    /** Filter files in a directory. */
-    private void listPaths(ArrayList<Path> results, Path f, PathFilter filter)
-      throws IOException {
-      Path listing[] = listPaths(f);
-      if (listing != null) {
-        for (int i = 0; i < listing.length; i++) {
-          if (filter.accept(listing[i])) {
-            results.add(listing[i]);
-          }
+  /** 
+   * Filter files in the given pathes using the default checksum filter. 
+   * @param files a list of paths
+   * @return a list of files under the source paths
+   * @exception IOException
+   */
+  public Path[] listPaths(Path[] files) throws IOException {
+    return listPaths(files, DEFAULT_FILTER);
+  }
+
+  /** Filter files in a directory. */
+  private void listPaths(ArrayList<Path> results, Path f, PathFilter filter)
+    throws IOException {
+    Path listing[] = listPaths(f);
+    if (listing != null) {
+      for (int i = 0; i < listing.length; i++) {
+        if (filter.accept(listing[i])) {
+          results.add(listing[i]);
         }
         }
-      }      
-    }
+      }
+    }      
+  }
     
     
-    /** Filter files in a directory. */
-    public Path[] listPaths(Path f, PathFilter filter) throws IOException {
-      ArrayList<Path> results = new ArrayList<Path>();
-      listPaths(results, f, filter);
-      return (Path[]) results.toArray(new Path[results.size()]);
-    }
+  /** Filter files in a directory. */
+  public Path[] listPaths(Path f, PathFilter filter) throws IOException {
+    ArrayList<Path> results = new ArrayList<Path>();
+    listPaths(results, f, filter);
+    return (Path[]) results.toArray(new Path[results.size()]);
+  }
     
     
-    /** 
-     * Filter files in a list directories using user-supplied path filter. 
-     * @param files a list of paths
-     * @return a list of files under the source paths
-     * @exception IOException
-     */
-    public Path[] listPaths(Path[] files, PathFilter filter)
+  /** 
+   * Filter files in a list directories using user-supplied path filter. 
+   * @param files a list of paths
+   * @return a list of files under the source paths
+   * @exception IOException
+   */
+  public Path[] listPaths(Path[] files, PathFilter filter)
     throws IOException {
     throws IOException {
-      ArrayList<Path> results = new ArrayList<Path>();
-      for(int i=0; i<files.length; i++) {
-        listPaths(results, files[i], filter);
-      }
-      return (Path[]) results.toArray(new Path[results.size()]);
+    ArrayList<Path> results = new ArrayList<Path>();
+    for(int i=0; i<files.length; i++) {
+      listPaths(results, files[i], filter);
     }
     }
+    return (Path[]) results.toArray(new Path[results.size()]);
+  }
     
     
-    /**
-     * <p>Return all the files that match filePattern and are not checksum
-     * files. Results are sorted by their names.
-     * 
-     * <p>
-     * A filename pattern is composed of <i>regular</i> characters and
-     * <i>special pattern matching</i> characters, which are:
-     *
-     * <dl>
-     *  <dd>
-     *   <dl>
-     *    <p>
-     *    <dt> <tt> ? </tt>
-     *    <dd> Matches any single character.
-     *
-     *    <p>
-     *    <dt> <tt> * </tt>
-     *    <dd> Matches zero or more characters.
-     *
-     *    <p>
-     *    <dt> <tt> [<i>abc</i>] </tt>
-     *    <dd> Matches a single character from character set
-     *     <tt>{<i>a,b,c</i>}</tt>.
-     *
-     *    <p>
-     *    <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
-     *    <dd> Matches a single character from the character range
-     *     <tt>{<i>a...b</i>}</tt>.  Note that character <tt><i>a</i></tt> must be
-     *     lexicographically less than or equal to character <tt><i>b</i></tt>.
-     *
-     *    <p>
-     *    <dt> <tt> [^<i>a</i>] </tt>
-     *    <dd> Matches a single character that is not from character set or range
-     *     <tt>{<i>a</i>}</tt>.  Note that the <tt>^</tt> character must occur
-     *     immediately to the right of the opening bracket.
-     *
-     *    <p>
-     *    <dt> <tt> \<i>c</i> </tt>
-     *    <dd> Removes (escapes) any special meaning of character <i>c</i>.
-     *
-     *   </dl>
-     *  </dd>
-     * </dl>
-     *
-     * @param filePattern a regular expression specifying file pattern
-
-     * @return an array of paths that match the file pattern
-     * @throws IOException
-     */
-    public Path[] globPaths(Path filePattern) throws IOException {
-      return globPaths(filePattern, DEFAULT_FILTER);
-    }
+  /**
+   * <p>Return all the files that match filePattern and are not checksum
+   * files. Results are sorted by their names.
+   * 
+   * <p>
+   * A filename pattern is composed of <i>regular</i> characters and
+   * <i>special pattern matching</i> characters, which are:
+   *
+   * <dl>
+   *  <dd>
+   *   <dl>
+   *    <p>
+   *    <dt> <tt> ? </tt>
+   *    <dd> Matches any single character.
+   *
+   *    <p>
+   *    <dt> <tt> * </tt>
+   *    <dd> Matches zero or more characters.
+   *
+   *    <p>
+   *    <dt> <tt> [<i>abc</i>] </tt>
+   *    <dd> Matches a single character from character set
+   *     <tt>{<i>a,b,c</i>}</tt>.
+   *
+   *    <p>
+   *    <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+   *    <dd> Matches a single character from the character range
+   *     <tt>{<i>a...b</i>}</tt>.  Note that character <tt><i>a</i></tt> must be
+   *     lexicographically less than or equal to character <tt><i>b</i></tt>.
+   *
+   *    <p>
+   *    <dt> <tt> [^<i>a</i>] </tt>
+   *    <dd> Matches a single character that is not from character set or range
+   *     <tt>{<i>a</i>}</tt>.  Note that the <tt>^</tt> character must occur
+   *     immediately to the right of the opening bracket.
+   *
+   *    <p>
+   *    <dt> <tt> \<i>c</i> </tt>
+   *    <dd> Removes (escapes) any special meaning of character <i>c</i>.
+   *
+   *   </dl>
+   *  </dd>
+   * </dl>
+   *
+   * @param filePattern a regular expression specifying file pattern
+
+   * @return an array of paths that match the file pattern
+   * @throws IOException
+   */
+  public Path[] globPaths(Path filePattern) throws IOException {
+    return globPaths(filePattern, DEFAULT_FILTER);
+  }
     
     
-    /** glob all the file names that matches filePattern
-     * and is accepted by filter.
-     */
-    public Path[] globPaths(Path filePattern, PathFilter filter) 
-        throws IOException {
-      Path [] parents = new Path[1];
-      int level = 0;
-      String filename = filePattern.toUri().getPath();
-      if("".equals(filename) || Path.SEPARATOR.equals(filename)) {
-        parents[0] = filePattern;
-        return parents;
-      }
-      
-      String [] components = filename.split(Path.SEPARATOR);
-      if(filePattern.isAbsolute()) {
-        parents[0] = new Path(Path.SEPARATOR);
-        level = 1;
-      } else {
-        parents[0] = new Path( "" );
-      }
+  /** glob all the file names that matches filePattern
+   * and is accepted by filter.
+   */
+  public Path[] globPaths(Path filePattern, PathFilter filter) 
+    throws IOException {
+    Path [] parents = new Path[1];
+    int level = 0;
+    String filename = filePattern.toUri().getPath();
+    if ("".equals(filename) || Path.SEPARATOR.equals(filename)) {
+      parents[0] = filePattern;
+      return parents;
+    }
       
       
-      Path[] results = globPathsLevel(parents, components, level, filter);
-      Arrays.sort(results);
-      return results;
+    String [] components = filename.split(Path.SEPARATOR);
+    if (filePattern.isAbsolute()) {
+      parents[0] = new Path(Path.SEPARATOR);
+      level = 1;
+    } else {
+      parents[0] = new Path("");
     }
     }
+      
+    Path[] results = globPathsLevel(parents, components, level, filter);
+    Arrays.sort(results);
+    return results;
+  }
     
     
-    private Path[] globPathsLevel(Path[] parents,
-        String [] filePattern, int level, PathFilter filter) throws IOException {
-      if (level == filePattern.length)
-        return parents;
-      GlobFilter fp = new GlobFilter(filePattern[level], filter);
-      if( fp.hasPattern()) {
-        parents = listPaths(parents, fp);
-      } else {
-        for(int i=0; i<parents.length; i++) {
-          parents[i] = new Path(parents[i], filePattern[level]);
-        }
+  private Path[] globPathsLevel(Path[] parents,
+                                String [] filePattern, int level, PathFilter filter) throws IOException {
+    if (level == filePattern.length)
+      return parents;
+    GlobFilter fp = new GlobFilter(filePattern[level], filter);
+    if (fp.hasPattern()) {
+      parents = listPaths(parents, fp);
+    } else {
+      for(int i=0; i<parents.length; i++) {
+        parents[i] = new Path(parents[i], filePattern[level]);
       }
       }
-      return globPathsLevel(parents, filePattern, level+1, filter);      
     }
     }
+    return globPathsLevel(parents, filePattern, level+1, filter);      
+  }
  
  
-    private static class GlobFilter implements PathFilter {
-      private PathFilter userFilter = DEFAULT_FILTER;
-      private Pattern regex;
-      private boolean hasPattern = false;
+  private static class GlobFilter implements PathFilter {
+    private PathFilter userFilter = DEFAULT_FILTER;
+    private Pattern regex;
+    private boolean hasPattern = false;
       
       
-      /** Default pattern character: Escape any special meaning. */
-      private static final char  PAT_ESCAPE = '\\';
-      /** Default pattern character: Any single character. */
-      private static final char  PAT_ANY = '.';
-      /** Default pattern character: Character set close. */
-      private static final char  PAT_SET_CLOSE = ']';
+    /** Default pattern character: Escape any special meaning. */
+    private static final char  PAT_ESCAPE = '\\';
+    /** Default pattern character: Any single character. */
+    private static final char  PAT_ANY = '.';
+    /** Default pattern character: Character set close. */
+    private static final char  PAT_SET_CLOSE = ']';
       
       
-      GlobFilter() {
-      }
+    GlobFilter() {
+    }
       
       
-      GlobFilter(String filePattern) throws IOException {
-        setRegex(filePattern);
-      }
+    GlobFilter(String filePattern) throws IOException {
+      setRegex(filePattern);
+    }
       
       
-      GlobFilter(String filePattern, PathFilter filter) throws IOException {
-        userFilter = filter;
-        setRegex(filePattern);
-      }
+    GlobFilter(String filePattern, PathFilter filter) throws IOException {
+      userFilter = filter;
+      setRegex(filePattern);
+    }
       
       
-      void setRegex(String filePattern) throws IOException {
-        int len;
-        int setOpen;
-        boolean setRange;
-        StringBuffer fileRegex = new StringBuffer();
-
-        // Validate the pattern
-        len = filePattern.length();
-        if (len == 0)
-          return;
-
-        setOpen = 0;
-        setRange = false;
+    void setRegex(String filePattern) throws IOException {
+      int len;
+      int setOpen;
+      boolean setRange;
+      StringBuffer fileRegex = new StringBuffer();
+
+      // Validate the pattern
+      len = filePattern.length();
+      if (len == 0)
+        return;
+
+      setOpen = 0;
+      setRange = false;
         
         
-        for (int i = 0; i < len; i++) {
-          char pCh;
+      for (int i = 0; i < len; i++) {
+        char pCh;
           
           
-          // Examine a single pattern character
-          pCh = filePattern.charAt(i);
-          if (pCh == PAT_ESCAPE) {
-            fileRegex.append(pCh);
-            i++;
-            if (i >= len)
-              error("An escaped character does not present", filePattern, i);
-            pCh = filePattern.charAt(i);
-          } else if (pCh == '.') {
-            fileRegex.append(PAT_ESCAPE);
-          } else if (pCh == '*') {
-            fileRegex.append(PAT_ANY);
-            hasPattern = true;
-          } else if (pCh == '?') {
-            pCh = PAT_ANY;
-            hasPattern = true;
-          } else if (pCh == '[' && setOpen == 0) {
-            setOpen++;
-            hasPattern = true;
-          } else if (pCh == '^' && setOpen > 0) {
-          } else if (pCh == '-' && setOpen > 0) {
-            // Character set range
-            setRange = true;
-          } else if (pCh == PAT_SET_CLOSE && setRange) {
-            // Incomplete character set range
-            error("Incomplete character set range", filePattern, i);
-          } else if (pCh == PAT_SET_CLOSE && setOpen > 0) {
-            // End of a character set
-            if (setOpen < 2)
-              error("Unexpected end of set", filePattern, i);
-            setOpen = 0;
-          } else if (setOpen > 0) {
-            // Normal character, or the end of a character set range
-            setOpen++;
-            setRange = false;
-          }
+        // Examine a single pattern character
+        pCh = filePattern.charAt(i);
+        if (pCh == PAT_ESCAPE) {
           fileRegex.append(pCh);
           fileRegex.append(pCh);
+          i++;
+          if (i >= len)
+            error("An escaped character does not present", filePattern, i);
+          pCh = filePattern.charAt(i);
+        } else if (pCh == '.') {
+          fileRegex.append(PAT_ESCAPE);
+        } else if (pCh == '*') {
+          fileRegex.append(PAT_ANY);
+          hasPattern = true;
+        } else if (pCh == '?') {
+          pCh = PAT_ANY;
+          hasPattern = true;
+        } else if (pCh == '[' && setOpen == 0) {
+          setOpen++;
+          hasPattern = true;
+        } else if (pCh == '^' && setOpen > 0) {
+        } else if (pCh == '-' && setOpen > 0) {
+          // Character set range
+          setRange = true;
+        } else if (pCh == PAT_SET_CLOSE && setRange) {
+          // Incomplete character set range
+          error("Incomplete character set range", filePattern, i);
+        } else if (pCh == PAT_SET_CLOSE && setOpen > 0) {
+          // End of a character set
+          if (setOpen < 2)
+            error("Unexpected end of set", filePattern, i);
+          setOpen = 0;
+        } else if (setOpen > 0) {
+          // Normal character, or the end of a character set range
+          setOpen++;
+          setRange = false;
         }
         }
+        fileRegex.append(pCh);
+      }
         
         
-        // Check for a well-formed pattern
-        if (setOpen > 0 || setRange) {
-          // Incomplete character set or character range
-          error("Expecting set closure character or end of range", filePattern,
+      // Check for a well-formed pattern
+      if (setOpen > 0 || setRange) {
+        // Incomplete character set or character range
+        error("Expecting set closure character or end of range", filePattern,
               len);
               len);
-        }
-        regex = Pattern.compile(fileRegex.toString());
       }
       }
+      regex = Pattern.compile(fileRegex.toString());
+    }
       
       
-      boolean hasPattern() {
-        return hasPattern;
-      }
+    boolean hasPattern() {
+      return hasPattern;
+    }
       
       
-      public boolean accept(Path path) {
-        return regex.matcher(path.getName()).matches() && userFilter.accept(path);
-      }
+    public boolean accept(Path path) {
+      return regex.matcher(path.getName()).matches() && userFilter.accept(path);
+    }
       
       
-      private void error(String s, String pattern, int pos) throws IOException {
-        throw new IOException("Illegal file pattern: "
-                                 +s+ " for glob "+ pattern + " at " + pos);
-      }
+    private void error(String s, String pattern, int pos) throws IOException {
+      throw new IOException("Illegal file pattern: "
+                            +s+ " for glob "+ pattern + " at " + pos);
     }
     }
+  }
     
     
-    /**
-     * Set the current working directory for the given file system. All relative
-     * paths will be resolved relative to it.
-     * 
-     * @param new_dir
-     */
-    public abstract void setWorkingDirectory(Path new_dir);
+  /**
+   * Set the current working directory for the given file system. All relative
+   * paths will be resolved relative to it.
+   * 
+   * @param new_dir
+   */
+  public abstract void setWorkingDirectory(Path new_dir);
     
     
-    /**
-     * Get the current working directory for the given file system
-     * @return the directory pathname
-     */
-    public abstract Path getWorkingDirectory();
+  /**
+   * Get the current working directory for the given file system
+   * @return the directory pathname
+   */
+  public abstract Path getWorkingDirectory();
     
     
-    /**
-     * Make the given file and all non-existent parents into
-     * directories. Has the semantics of Unix 'mkdir -p'.
-     * Existence of the directory hierarchy is not an error.
-     */
-    public abstract boolean mkdirs(Path f) throws IOException;
-
-    /**
-     * Obtain a lock on the given Path
-     * 
-     * @deprecated FS does not support file locks anymore.
-     */
-    @Deprecated
-    public abstract void lock(Path f, boolean shared) throws IOException;
-
-    /**
-     * Release the lock
-     * 
-     * @deprecated FS does not support file locks anymore.     
-     */
-    @Deprecated
-    public abstract void release(Path f) throws IOException;
-
-    /**
-     * The src file is on the local disk.  Add it to FS at
-     * the given dst name and the source is kept intact afterwards
-     */
-    public void copyFromLocalFile(Path src, Path dst)
+  /**
+   * Make the given file and all non-existent parents into
+   * directories. Has the semantics of Unix 'mkdir -p'.
+   * Existence of the directory hierarchy is not an error.
+   */
+  public abstract boolean mkdirs(Path f) throws IOException;
+
+  /**
+   * Obtain a lock on the given Path
+   * 
+   * @deprecated FS does not support file locks anymore.
+   */
+  @Deprecated
+  public abstract void lock(Path f, boolean shared) throws IOException;
+
+  /**
+   * Release the lock
+   * 
+   * @deprecated FS does not support file locks anymore.     
+   */
+  @Deprecated
+  public abstract void release(Path f) throws IOException;
+
+  /**
+   * The src file is on the local disk.  Add it to FS at
+   * the given dst name and the source is kept intact afterwards
+   */
+  public void copyFromLocalFile(Path src, Path dst)
     throws IOException {
     throws IOException {
-      copyFromLocalFile(false, src, dst);
-    }
-
-    /**
-     * The src file is on the local disk.  Add it to FS at
-     * the given dst name, removing the source afterwards.
-     */
-    public void moveFromLocalFile(Path src, Path dst)
+    copyFromLocalFile(false, src, dst);
+  }
+
+  /**
+   * The src file is on the local disk.  Add it to FS at
+   * the given dst name, removing the source afterwards.
+   */
+  public void moveFromLocalFile(Path src, Path dst)
     throws IOException {
     throws IOException {
-      copyFromLocalFile(true, src, dst);
-    }
-
-    /**
-     * The src file is on the local disk.  Add it to FS at
-     * the given dst name.
-     * delSrc indicates if the source should be removed
-     */
-    public abstract void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+    copyFromLocalFile(true, src, dst);
+  }
+
+  /**
+   * The src file is on the local disk.  Add it to FS at
+   * the given dst name.
+   * delSrc indicates if the source should be removed
+   */
+  public abstract void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException;
     throws IOException;
     
     
-    /**
-     * The src file is under FS, and the dst is on the local disk.
-     * Copy it from FS control to the local dst name.
-     */
-    public void copyToLocalFile(Path src, Path dst) throws IOException {
-      copyToLocalFile(false, src, dst);
-    }
+  /**
+   * The src file is under FS, and the dst is on the local disk.
+   * Copy it from FS control to the local dst name.
+   */
+  public void copyToLocalFile(Path src, Path dst) throws IOException {
+    copyToLocalFile(false, src, dst);
+  }
     
     
-    /**
-     * The src file is under FS, and the dst is on the local disk.
-     * Copy it from FS control to the local dst name.
-     * Remove the source afterwards
-     */
-    public void moveToLocalFile(Path src, Path dst) throws IOException {
-      copyToLocalFile(true, src, dst);
-    }
-
-    /**
-     * The src file is under FS, and the dst is on the local disk.
-     * Copy it from FS control to the local dst name.
-     * delSrc indicates if the src will be removed or not.
-     */   
-    public abstract void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  /**
+   * The src file is under FS, and the dst is on the local disk.
+   * Copy it from FS control to the local dst name.
+   * Remove the source afterwards
+   */
+  public void moveToLocalFile(Path src, Path dst) throws IOException {
+    copyToLocalFile(true, src, dst);
+  }
+
+  /**
+   * The src file is under FS, and the dst is on the local disk.
+   * Copy it from FS control to the local dst name.
+   * delSrc indicates if the src will be removed or not.
+   */   
+  public abstract void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException;
     throws IOException;
 
 
-    /**
-     * Returns a local File that the user can write output to.  The caller
-     * provides both the eventual FS target name and the local working
-     * file.  If the FS is local, we write directly into the target.  If
-     * the FS is remote, we write into the tmp local area.
-     */
-    public abstract Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException;
-
-    /**
-     * Called when we're all done writing to the target.  A local FS will
-     * do nothing, because we've written to exactly the right place.  A remote
-     * FS will copy the contents of tmpLocalFile to the correct target at
-     * fsOutputFile.
-     */
-    public abstract void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException;
-
-    /**
-     * No more filesystem operations are needed.  Will
-     * release any held locks.
-     */
-    public void close() throws IOException {
-      URI uri = getUri();
-      synchronized (FileSystem.class) {
-        Map<String,FileSystem> authorityToFs = CACHE.get(uri.getScheme());
-        if (authorityToFs != null) {
-          authorityToFs.remove(uri.getAuthority());
-        }
+  /**
+   * Returns a local File that the user can write output to.  The caller
+   * provides both the eventual FS target name and the local working
+   * file.  If the FS is local, we write directly into the target.  If
+   * the FS is remote, we write into the tmp local area.
+   */
+  public abstract Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException;
+
+  /**
+   * Called when we're all done writing to the target.  A local FS will
+   * do nothing, because we've written to exactly the right place.  A remote
+   * FS will copy the contents of tmpLocalFile to the correct target at
+   * fsOutputFile.
+   */
+  public abstract void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException;
+
+  /**
+   * No more filesystem operations are needed.  Will
+   * release any held locks.
+   */
+  public void close() throws IOException {
+    URI uri = getUri();
+    synchronized (FileSystem.class) {
+      Map<String,FileSystem> authorityToFs = CACHE.get(uri.getScheme());
+      if (authorityToFs != null) {
+        authorityToFs.remove(uri.getAuthority());
       }
       }
     }
     }
+  }
 
 
-    /** Return the total size of all files in the filesystem.*/
-    public long getUsed() throws IOException{
-      long used = 0;
-      Path[] files = listPaths(new Path("/"));
-      for(Path file:files){
-        used += getContentLength(file);
-      }
-      return used;
+  /** Return the total size of all files in the filesystem.*/
+  public long getUsed() throws IOException{
+    long used = 0;
+    Path[] files = listPaths(new Path("/"));
+    for(Path file:files){
+      used += getContentLength(file);
     }
     }
+    return used;
+  }
 
 
-    /**
-     * Get the block size for a particular file.
-     * @param f the filename
-     * @return the number of bytes in a block
-     */
-    public abstract long getBlockSize(Path f) throws IOException;
+  /**
+   * Get the block size for a particular file.
+   * @param f the filename
+   * @return the number of bytes in a block
+   */
+  public abstract long getBlockSize(Path f) throws IOException;
     
     
-    /** Return the number of bytes that large input files should be optimally
-     * be split into to minimize i/o time. */
-    public long getDefaultBlockSize() {
-      // default to 32MB: large enough to minimize the impact of seeks
-      return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
-    }
+  /** Return the number of bytes that large input files should be optimally
+   * be split into to minimize i/o time. */
+  public long getDefaultBlockSize() {
+    // default to 32MB: large enough to minimize the impact of seeks
+    return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
+  }
     
     
-    /**
-     * Get the default replication.
-     */
-    public abstract short getDefaultReplication();
+  /**
+   * Get the default replication.
+   */
+  public abstract short getDefaultReplication();
 
 
 }
 }

+ 21 - 21
src/java/org/apache/hadoop/fs/FileUtil.java

@@ -40,7 +40,7 @@ public class FileUtil {
     if (contents != null) {
     if (contents != null) {
       for (int i = 0; i < contents.length; i++) {
       for (int i = 0; i < contents.length; i++) {
         if (contents[i].isFile()) {
         if (contents[i].isFile()) {
-          if (! contents[i].delete()) {
+          if (!contents[i].delete()) {
             return false;
             return false;
           }
           }
         } else {
         } else {
@@ -54,7 +54,7 @@ public class FileUtil {
           }
           }
           // if not an empty directory or symlink let
           // if not an empty directory or symlink let
           // fullydelete handle it.
           // fullydelete handle it.
-          if (! fullyDelete(contents[i])) {
+          if (!fullyDelete(contents[i])) {
             return false;
             return false;
           }
           }
         }
         }
@@ -67,7 +67,7 @@ public class FileUtil {
   public static boolean copy(FileSystem srcFS, Path src, 
   public static boolean copy(FileSystem srcFS, Path src, 
                              FileSystem dstFS, Path dst, 
                              FileSystem dstFS, Path dst, 
                              boolean deleteSource,
                              boolean deleteSource,
-                             Configuration conf ) throws IOException {
+                             Configuration conf) throws IOException {
     dst = checkDest(src.getName(), dstFS, dst);
     dst = checkDest(src.getName(), dstFS, dst);
 
 
     if (srcFS.isDirectory(src)) {
     if (srcFS.isDirectory(src)) {
@@ -117,7 +117,7 @@ public class FileUtil {
           InputStream in = srcFS.open(contents[i]);
           InputStream in = srcFS.open(contents[i]);
           try {
           try {
             copyContent(in, out, conf, false);
             copyContent(in, out, conf, false);
-            if(addString!=null)
+            if (addString!=null)
               out.write(addString.getBytes("UTF-8"));
               out.write(addString.getBytes("UTF-8"));
                 
                 
           } finally {
           } finally {
@@ -141,7 +141,7 @@ public class FileUtil {
   public static boolean copy(File src,
   public static boolean copy(File src,
                              FileSystem dstFS, Path dst,
                              FileSystem dstFS, Path dst,
                              boolean deleteSource,
                              boolean deleteSource,
-                             Configuration conf ) throws IOException {
+                             Configuration conf) throws IOException {
     dst = checkDest(src.getName(), dstFS, dst);
     dst = checkDest(src.getName(), dstFS, dst);
 
 
     if (src.isDirectory()) {
     if (src.isDirectory()) {
@@ -171,7 +171,7 @@ public class FileUtil {
   /** Copy FileSystem files to local files. */
   /** Copy FileSystem files to local files. */
   public static boolean copy(FileSystem srcFS, Path src, 
   public static boolean copy(FileSystem srcFS, Path src, 
                              File dst, boolean deleteSource,
                              File dst, boolean deleteSource,
-                             Configuration conf ) throws IOException {
+                             Configuration conf) throws IOException {
 
 
     dst = checkDest(src.getName(), dst);
     dst = checkDest(src.getName(), dst);
 
 
@@ -215,7 +215,7 @@ public class FileUtil {
         bytesRead = in.read(buf);
         bytesRead = in.read(buf);
       }
       }
     } finally {
     } finally {
-      if(close)
+      if (close)
         out.close();
         out.close();
     }
     }
   }
   }
@@ -284,7 +284,7 @@ public class FileUtil {
   public static void unZip(File inFile, File unzipDir) throws IOException {
   public static void unZip(File inFile, File unzipDir) throws IOException {
     Enumeration entries;
     Enumeration entries;
     ZipFile zipFile = new ZipFile(inFile);
     ZipFile zipFile = new ZipFile(inFile);
-    ;
+
     try {
     try {
       entries = zipFile.entries();
       entries = zipFile.entries();
       while (entries.hasMoreElements()) {
       while (entries.hasMoreElements()) {
@@ -334,44 +334,44 @@ public class FileUtil {
     private static String[] hardLinkCommand;
     private static String[] hardLinkCommand;
     
     
     static {
     static {
-      switch( getOSType() ) {
+      switch(getOSType()) {
       case OS_TYPE_WINXP:
       case OS_TYPE_WINXP:
-        hardLinkCommand = new String[] {"fsutil","hardlink","create",null,null};
+        hardLinkCommand = new String[] {"fsutil","hardlink","create", null, null};
         break;
         break;
       case OS_TYPE_UNIX:
       case OS_TYPE_UNIX:
       default:
       default:
-        hardLinkCommand = new String[] {"ln",null,null};
+        hardLinkCommand = new String[] {"ln", null, null};
       }
       }
     }
     }
 
 
     static OSType getOSType() {
     static OSType getOSType() {
       String osName = System.getProperty("os.name");
       String osName = System.getProperty("os.name");
-      if( osName.indexOf( "Windows") >= 0 && 
-          (osName.indexOf( "XpP") >= 0 || osName.indexOf( "2003") >= 0 ) )
+      if (osName.indexOf("Windows") >= 0 && 
+          (osName.indexOf("XpP") >= 0 || osName.indexOf("2003") >= 0))
         return OSType.OS_TYPE_WINXP;
         return OSType.OS_TYPE_WINXP;
       else
       else
         return OSType.OS_TYPE_UNIX;
         return OSType.OS_TYPE_UNIX;
     }
     }
     
     
     public static void createHardLink(File target, 
     public static void createHardLink(File target, 
-                                      File linkName ) throws IOException {
+                                      File linkName) throws IOException {
       int len = hardLinkCommand.length;
       int len = hardLinkCommand.length;
       hardLinkCommand[len-2] = target.getCanonicalPath();
       hardLinkCommand[len-2] = target.getCanonicalPath();
       hardLinkCommand[len-1] = linkName.getCanonicalPath();
       hardLinkCommand[len-1] = linkName.getCanonicalPath();
       // execute shell command
       // execute shell command
-      Process process = Runtime.getRuntime().exec( hardLinkCommand );
+      Process process = Runtime.getRuntime().exec(hardLinkCommand);
       try {
       try {
         if (process.waitFor() != 0) {
         if (process.waitFor() != 0) {
           String errMsg = new BufferedReader(new InputStreamReader(
           String errMsg = new BufferedReader(new InputStreamReader(
                                                                    process.getInputStream())).readLine();
                                                                    process.getInputStream())).readLine();
-          if( errMsg == null )  errMsg = "";
+          if (errMsg == null)  errMsg = "";
           String inpMsg = new BufferedReader(new InputStreamReader(
           String inpMsg = new BufferedReader(new InputStreamReader(
                                                                    process.getErrorStream())).readLine();
                                                                    process.getErrorStream())).readLine();
-          if( inpMsg == null )  inpMsg = "";
-          throw new IOException( errMsg + inpMsg );
+          if (inpMsg == null)  inpMsg = "";
+          throw new IOException(errMsg + inpMsg);
         }
         }
       } catch (InterruptedException e) {
       } catch (InterruptedException e) {
-        throw new IOException( StringUtils.stringifyException( e ));
+        throw new IOException(StringUtils.stringifyException(e));
       } finally {
       } finally {
         process.destroy();
         process.destroy();
       }
       }
@@ -387,7 +387,7 @@ public class FileUtil {
    */
    */
   public static int symLink(String target, String linkname) throws IOException{
   public static int symLink(String target, String linkname) throws IOException{
     String cmd = "ln -s " + target + " " + linkname;
     String cmd = "ln -s " + target + " " + linkname;
-    Process p = Runtime.getRuntime().exec( cmd, null );
+    Process p = Runtime.getRuntime().exec(cmd, null);
     int returnVal = -1;
     int returnVal = -1;
     try{
     try{
       returnVal = p.waitFor();
       returnVal = p.waitFor();
@@ -408,7 +408,7 @@ public class FileUtil {
   public static int chmod(String filename, String perm
   public static int chmod(String filename, String perm
                           ) throws IOException, InterruptedException {
                           ) throws IOException, InterruptedException {
     String cmd = "chmod " + perm + " " + filename;
     String cmd = "chmod " + perm + " " + filename;
-    Process p = Runtime.getRuntime().exec( cmd, null );
+    Process p = Runtime.getRuntime().exec(cmd, null);
     return p.waitFor();
     return p.waitFor();
   }
   }
 }
 }

+ 11 - 11
src/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -44,7 +44,7 @@ public class FilterFileSystem extends FileSystem {
   
   
   protected FileSystem fs;
   protected FileSystem fs;
   
   
-  public FilterFileSystem( FileSystem fs) {
+  public FilterFileSystem(FileSystem fs) {
     this.fs = fs;
     this.fs = fs;
   }
   }
 
 
@@ -114,13 +114,13 @@ public class FilterFileSystem extends FileSystem {
    * @param bufferSize the size of the buffer to be used.
    * @param bufferSize the size of the buffer to be used.
    * @param replication required block replication for the file. 
    * @param replication required block replication for the file. 
    */
    */
-  public FSDataOutputStream create( Path f, 
-                                    boolean overwrite,
-                                    int bufferSize,
-                                    short replication,
-                                    long blockSize,
-                                    Progressable progress
-                                    ) throws IOException {
+  public FSDataOutputStream create(Path f, 
+                                   boolean overwrite,
+                                   int bufferSize,
+                                   short replication,
+                                   long blockSize,
+                                   Progressable progress
+                                   ) throws IOException {
     return fs.create(f, overwrite, bufferSize, replication, blockSize, progress);
     return fs.create(f, overwrite, bufferSize, replication, blockSize, progress);
   }
   }
 
 
@@ -217,7 +217,7 @@ public class FilterFileSystem extends FileSystem {
    * @deprecated FS does not support file locks anymore.
    * @deprecated FS does not support file locks anymore.
    */
    */
   @Deprecated
   @Deprecated
-    public void lock(Path f, boolean shared) throws IOException {
+  public void lock(Path f, boolean shared) throws IOException {
     fs.lock(f, shared);
     fs.lock(f, shared);
   }
   }
 
 
@@ -227,7 +227,7 @@ public class FilterFileSystem extends FileSystem {
    * @deprecated FS does not support file locks anymore.     
    * @deprecated FS does not support file locks anymore.     
    */
    */
   @Deprecated
   @Deprecated
-    public void release(Path f) throws IOException {
+  public void release(Path f) throws IOException {
     fs.release(f);
     fs.release(f);
   }
   }
 
 
@@ -296,7 +296,7 @@ public class FilterFileSystem extends FileSystem {
   }
   }
 
 
   @Override
   @Override
-    public Configuration getConf() {
+  public Configuration getConf() {
     return fs.getConf();
     return fs.getConf();
   }
   }
 }
 }

+ 54 - 54
src/java/org/apache/hadoop/fs/FsShell.java

@@ -121,7 +121,7 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem.globPaths 
    * @see org.apache.hadoop.fs.FileSystem.globPaths 
    */
    */
   void copyToLocal(String[]argv, int pos) throws IOException {
   void copyToLocal(String[]argv, int pos) throws IOException {
-    if(argv.length-pos<2 || (argv.length-pos==2 && argv[pos].equalsIgnoreCase("-crc"))) {
+    if (argv.length-pos<2 || (argv.length-pos==2 && argv[pos].equalsIgnoreCase("-crc"))) {
       System.err.println("Usage: -get [-crc] <src> <dst>");
       System.err.println("Usage: -get [-crc] <src> <dst>");
       System.exit(-1);
       System.exit(-1);
     }
     }
@@ -132,19 +132,19 @@ public class FsShell extends ToolBase {
     }
     }
     String srcf = argv[pos++];
     String srcf = argv[pos++];
     String dstf = argv[pos++];
     String dstf = argv[pos++];
-    if( dstf.equals("-")) {
+    if (dstf.equals("-")) {
       if (copyCrc) {
       if (copyCrc) {
         System.err.println("-crc option is not valid when destination is stdout.");
         System.err.println("-crc option is not valid when destination is stdout.");
       }
       }
       cat(srcf);
       cat(srcf);
     } else {
     } else {
-      Path [] srcs = fs.globPaths( new Path(srcf) );
-      if( srcs.length > 1 && !new File( dstf ).isDirectory()) {
-        throw new IOException( "When copying multiple files, " 
-                               + "destination should be a directory." );
+      Path [] srcs = fs.globPaths(new Path(srcf));
+      if (srcs.length > 1 && !new File(dstf).isDirectory()) {
+        throw new IOException("When copying multiple files, " 
+                              + "destination should be a directory.");
       }
       }
-      Path dst = new Path( dstf );
-      for( int i=0; i<srcs.length; i++ ) {
+      Path dst = new Path(dstf);
+      for(int i=0; i<srcs.length; i++) {
         ((DistributedFileSystem)fs).copyToLocalFile(srcs[i], dst, copyCrc);
         ((DistributedFileSystem)fs).copyToLocalFile(srcs[i], dst, copyCrc);
       }
       }
     }
     }
@@ -178,9 +178,9 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem.globPaths 
    * @see org.apache.hadoop.fs.FileSystem.globPaths 
    */
    */
   void copyMergeToLocal(String srcf, Path dst, boolean endline) throws IOException {
   void copyMergeToLocal(String srcf, Path dst, boolean endline) throws IOException {
-    Path [] srcs = fs.globPaths( new Path( srcf ) );
-    for( int i=0; i<srcs.length; i++ ) {
-      if(endline) {
+    Path [] srcs = fs.globPaths(new Path(srcf));
+    for(int i=0; i<srcs.length; i++) {
+      if (endline) {
         FileUtil.copyMerge(fs, srcs[i], 
         FileUtil.copyMerge(fs, srcs[i], 
                            FileSystem.getLocal(conf), dst, false, conf, "\n");
                            FileSystem.getLocal(conf), dst, false, conf, "\n");
       } else {
       } else {
@@ -206,8 +206,8 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem.globPaths 
    * @see org.apache.hadoop.fs.FileSystem.globPaths 
    */
    */
   void cat(String srcf) throws IOException {
   void cat(String srcf) throws IOException {
-    Path [] srcs = fs.globPaths( new Path( srcf ) );
-    for( int i=0; i<srcs.length; i++ ) {
+    Path [] srcs = fs.globPaths(new Path(srcf));
+    for(int i=0; i<srcs.length; i++) {
       printToStdout(srcs[i]);
       printToStdout(srcs[i]);
     }
     }
   }
   }
@@ -219,7 +219,7 @@ public class FsShell extends ToolBase {
    * @throws IOException 
    * @throws IOException 
    */
    */
   private void setReplication(String[] cmd, int pos) throws IOException {
   private void setReplication(String[] cmd, int pos) throws IOException {
-    if(cmd.length-pos<2 || (cmd.length-pos==2 && cmd[pos].equalsIgnoreCase("-R"))) {
+    if (cmd.length-pos<2 || (cmd.length-pos==2 && cmd[pos].equalsIgnoreCase("-R"))) {
       System.err.println("Usage: [-R] <repvalue> <path>");
       System.err.println("Usage: [-R] <repvalue> <path>");
       System.exit(-1);
       System.exit(-1);
     }
     }
@@ -227,7 +227,7 @@ public class FsShell extends ToolBase {
     boolean recursive = false;
     boolean recursive = false;
     short rep = 3;
     short rep = 3;
       
       
-    if("-R".equalsIgnoreCase(cmd[pos])) {
+    if ("-R".equalsIgnoreCase(cmd[pos])) {
       recursive=true;
       recursive=true;
       pos++;
       pos++;
         
         
@@ -256,16 +256,16 @@ public class FsShell extends ToolBase {
    */
    */
   public void setReplication(short newRep, String srcf, boolean recursive)
   public void setReplication(short newRep, String srcf, boolean recursive)
     throws IOException {
     throws IOException {
-    Path[] srcs = fs.globPaths( new Path(srcf) );
-    for( int i=0; i<srcs.length; i++ ) {
-      setReplication( newRep, srcs[i], recursive );
+    Path[] srcs = fs.globPaths(new Path(srcf));
+    for(int i=0; i<srcs.length; i++) {
+      setReplication(newRep, srcs[i], recursive);
     }
     }
   }
   }
     
     
   private void setReplication(short newRep, Path src, boolean recursive)
   private void setReplication(short newRep, Path src, boolean recursive)
     throws IOException {
     throws IOException {
   	
   	
-    if(!fs.isDirectory(src)) {
+    if (!fs.isDirectory(src)) {
       setFileReplication(src, newRep);
       setFileReplication(src, newRep);
       return;
       return;
     }
     }
@@ -277,9 +277,9 @@ public class FsShell extends ToolBase {
 
 
       for (int i = 0; i < items.length; i++) {
       for (int i = 0; i < items.length; i++) {
         Path cur = items[i];
         Path cur = items[i];
-        if(!fs.isDirectory(cur)) {
+        if (!fs.isDirectory(cur)) {
           setFileReplication(cur, newRep);
           setFileReplication(cur, newRep);
-        } else if(recursive) {
+        } else if (recursive) {
           setReplication(newRep, cur, recursive);
           setReplication(newRep, cur, recursive);
         }
         }
       }
       }
@@ -295,7 +295,7 @@ public class FsShell extends ToolBase {
    */
    */
   private void setFileReplication(Path file, short newRep) throws IOException {
   private void setFileReplication(Path file, short newRep) throws IOException {
     	
     	
-    if(fs.setReplication(file, newRep)) {
+    if (fs.setReplication(file, newRep)) {
       System.out.println("Replication " + newRep + " set: " + file);
       System.out.println("Replication " + newRep + " set: " + file);
     } else {
     } else {
       System.err.println("Could not set replication for: " + file);
       System.err.println("Could not set replication for: " + file);
@@ -311,7 +311,7 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
    */
   public void ls(String srcf, boolean recursive) throws IOException {
   public void ls(String srcf, boolean recursive) throws IOException {
-    Path[] srcs = fs.globPaths( new Path(srcf) );
+    Path[] srcs = fs.globPaths(new Path(srcf));
     boolean printHeader = (srcs.length == 1) ? true: false;
     boolean printHeader = (srcs.length == 1) ? true: false;
     for(int i=0; i<srcs.length; i++) {
     for(int i=0; i<srcs.length; i++) {
       ls(srcs[i], recursive, printHeader);
       ls(srcs[i], recursive, printHeader);
@@ -319,12 +319,12 @@ public class FsShell extends ToolBase {
   }
   }
 
 
   /* list all files under the directory <i>src</i>*/
   /* list all files under the directory <i>src</i>*/
-  private void ls(Path src, boolean recursive, boolean printHeader ) throws IOException {
+  private void ls(Path src, boolean recursive, boolean printHeader) throws IOException {
     Path items[] = fs.listPaths(src);
     Path items[] = fs.listPaths(src);
     if (items == null) {
     if (items == null) {
       throw new IOException("Could not get listing for " + src);
       throw new IOException("Could not get listing for " + src);
     } else {
     } else {
-      if(!recursive && printHeader ) {
+      if (!recursive && printHeader) {
         System.out.println("Found " + items.length + " items");
         System.out.println("Found " + items.length + " items");
       }
       }
       for (int i = 0; i < items.length; i++) {
       for (int i = 0; i < items.length; i++) {
@@ -334,7 +334,7 @@ public class FsShell extends ToolBase {
                               "<dir>" : 
                               "<dir>" : 
                               ("<r " + fs.getReplication(cur) 
                               ("<r " + fs.getReplication(cur) 
                                + ">\t" + fs.getLength(cur))));
                                + ">\t" + fs.getLength(cur))));
-        if(recursive && fs.isDirectory(cur)) {
+        if (recursive && fs.isDirectory(cur)) {
           ls(cur, recursive, printHeader);
           ls(cur, recursive, printHeader);
         }
         }
       }
       }
@@ -348,7 +348,7 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
    */
   public void du(String src) throws IOException {
   public void du(String src) throws IOException {
-    Path items[] = fs.listPaths( fs.globPaths( new Path(src) ) );
+    Path items[] = fs.listPaths(fs.globPaths(new Path(src)));
     if (items == null) {
     if (items == null) {
       throw new IOException("Could not get listing for " + src);
       throw new IOException("Could not get listing for " + src);
     } else {
     } else {
@@ -368,12 +368,12 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
    */
   public void dus(String src) throws IOException {
   public void dus(String src) throws IOException {
-    Path paths[] = fs.globPaths( new Path(src) );
-    if( paths==null && paths.length==0 ) {
-      throw new IOException( "dus: No match: " + src );
+    Path paths[] = fs.globPaths(new Path(src));
+    if (paths==null && paths.length==0) {
+      throw new IOException("dus: No match: " + src);
     }
     }
     for(int i=0; i<paths.length; i++) {
     for(int i=0; i<paths.length; i++) {
-      Path items[] = fs.listPaths( paths[i] );
+      Path items[] = fs.listPaths(paths[i]);
       if (items != null) {
       if (items != null) {
         long totalSize=0;
         long totalSize=0;
         for(int j=0; j<items.length; j++) {
         for(int j=0; j<items.length; j++) {
@@ -407,13 +407,13 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
    */
   public void rename(String srcf, String dstf) throws IOException {
   public void rename(String srcf, String dstf) throws IOException {
-    Path [] srcs = fs.globPaths( new Path(srcf) );
+    Path [] srcs = fs.globPaths(new Path(srcf));
     Path dst = new Path(dstf);
     Path dst = new Path(dstf);
-    if( srcs.length > 1 && !fs.isDirectory(dst)) {
-      throw new IOException( "When moving multiple files, " 
-                             + "destination should be a directory." );
+    if (srcs.length > 1 && !fs.isDirectory(dst)) {
+      throw new IOException("When moving multiple files, " 
+                            + "destination should be a directory.");
     }
     }
-    for( int i=0; i<srcs.length; i++ ) {
+    for(int i=0; i<srcs.length; i++) {
       if (fs.rename(srcs[i], dst)) {
       if (fs.rename(srcs[i], dst)) {
         System.out.println("Renamed " + srcs[i] + " to " + dstf);
         System.out.println("Renamed " + srcs[i] + " to " + dstf);
       } else {
       } else {
@@ -442,8 +442,8 @@ public class FsShell extends ToolBase {
     if (argv.length > 3) {
     if (argv.length > 3) {
       Path dst = new Path(dest);
       Path dst = new Path(dest);
       if (!fs.isDirectory(dst)) {
       if (!fs.isDirectory(dst)) {
-        throw new IOException( "When moving multiple files, " 
-                               + "destination " + dest + " should be a directory." );
+        throw new IOException("When moving multiple files, " 
+                              + "destination " + dest + " should be a directory.");
       }
       }
     }
     }
     //
     //
@@ -493,13 +493,13 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
    */
   public void copy(String srcf, String dstf, Configuration conf) throws IOException {
   public void copy(String srcf, String dstf, Configuration conf) throws IOException {
-    Path [] srcs = fs.globPaths( new Path(srcf) );
+    Path [] srcs = fs.globPaths(new Path(srcf));
     Path dst = new Path(dstf);
     Path dst = new Path(dstf);
-    if( srcs.length > 1 && !fs.isDirectory(dst)) {
-      throw new IOException( "When copying multiple files, " 
-                             + "destination should be a directory." );
+    if (srcs.length > 1 && !fs.isDirectory(dst)) {
+      throw new IOException("When copying multiple files, " 
+                            + "destination should be a directory.");
     }
     }
-    for( int i=0; i<srcs.length; i++ ) {
+    for(int i=0; i<srcs.length; i++) {
       FileUtil.copy(fs, srcs[i], fs, dst, false, conf);
       FileUtil.copy(fs, srcs[i], fs, dst, false, conf);
     }
     }
   }
   }
@@ -524,8 +524,8 @@ public class FsShell extends ToolBase {
     if (argv.length > 3) {
     if (argv.length > 3) {
       Path dst = new Path(dest);
       Path dst = new Path(dest);
       if (!fs.isDirectory(dst)) {
       if (!fs.isDirectory(dst)) {
-        throw new IOException( "When copying multiple files, " 
-                               + "destination " + dest + " should be a directory." );
+        throw new IOException("When copying multiple files, " 
+                              + "destination " + dest + " should be a directory.");
       }
       }
     }
     }
     //
     //
@@ -572,14 +572,14 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
    */
   public void delete(String srcf, boolean recursive) throws IOException {
   public void delete(String srcf, boolean recursive) throws IOException {
-    Path [] srcs = fs.globPaths( new Path(srcf) );
-    for( int i=0; i<srcs.length; i++ ) {
+    Path [] srcs = fs.globPaths(new Path(srcf));
+    for(int i=0; i<srcs.length; i++) {
       delete(srcs[i], recursive);
       delete(srcs[i], recursive);
     }
     }
   }
   }
     
     
   /* delete a file */
   /* delete a file */
-  private void delete(Path src, boolean recursive ) throws IOException {
+  private void delete(Path src, boolean recursive) throws IOException {
     if (fs.isDirectory(src) && !recursive) {
     if (fs.isDirectory(src) && !recursive) {
       throw new IOException("Cannot remove directory \"" + src +
       throw new IOException("Cannot remove directory \"" + src +
                             "\", use -rmr instead");
                             "\", use -rmr instead");
@@ -613,7 +613,7 @@ public class FsShell extends ToolBase {
     } else if (len < 1024 * 1024 * 1024) {
     } else if (len < 1024 * 1024 * 1024) {
       val = (1.0 * len) / (1024 * 1024);
       val = (1.0 * len) / (1024 * 1024);
       ending = " MB";
       ending = " MB";
-    } else if (len < 128L * 1024 * 1024 * 1024 ) {
+    } else if (len < 128L * 1024 * 1024 * 1024) {
       val = (1.0 * len) / (1024 * 1024 * 1024);
       val = (1.0 * len) / (1024 * 1024 * 1024);
       ending = " GB";
       ending = " GB";
     } else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
     } else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
@@ -915,7 +915,7 @@ public class FsShell extends ToolBase {
       System.err.println("           [-fs <local | file system URI>]");
       System.err.println("           [-fs <local | file system URI>]");
       System.err.println("           [-conf <configuration file>]");
       System.err.println("           [-conf <configuration file>]");
       System.err.println("           [-D <[property=value>]");
       System.err.println("           [-D <[property=value>]");
-      System.err.println("           [-ls <path>]" );
+      System.err.println("           [-ls <path>]");
       System.err.println("           [-lsr <path>]");
       System.err.println("           [-lsr <path>]");
       System.err.println("           [-du <path>]");
       System.err.println("           [-du <path>]");
       System.err.println("           [-dus <path>]");
       System.err.println("           [-dus <path>]");
@@ -941,7 +941,7 @@ public class FsShell extends ToolBase {
   /**
   /**
    * run
    * run
    */
    */
-  public int run( String argv[] ) throws Exception {
+  public int run(String argv[]) throws Exception {
 
 
     if (argv.length < 1) {
     if (argv.length < 1) {
       printUsage(""); 
       printUsage(""); 
@@ -1001,7 +1001,7 @@ public class FsShell extends ToolBase {
       } else if ("-get".equals(cmd) || "-copyToLocal".equals(cmd)) {
       } else if ("-get".equals(cmd) || "-copyToLocal".equals(cmd)) {
         copyToLocal(argv, i);
         copyToLocal(argv, i);
       } else if ("-getmerge".equals(cmd)) {
       } else if ("-getmerge".equals(cmd)) {
-        if(argv.length>i+2)
+        if (argv.length>i+2)
           copyMergeToLocal(argv[i++], new Path(argv[i++]), Boolean.parseBoolean(argv[i++]));
           copyMergeToLocal(argv[i++], new Path(argv[i++]), Boolean.parseBoolean(argv[i++]));
         else
         else
           copyMergeToLocal(argv[i++], new Path(argv[i++]));
           copyMergeToLocal(argv[i++], new Path(argv[i++]));
@@ -1039,7 +1039,7 @@ public class FsShell extends ToolBase {
         } else {
         } else {
           du("");
           du("");
         }
         }
-      } else if( "-dus".equals(cmd)) {
+      } else if ("-dus".equals(cmd)) {
         if (i < argv.length) {
         if (i < argv.length) {
           exitCode = doall(cmd, argv, conf, i);
           exitCode = doall(cmd, argv, conf, i);
         } else {
         } else {
@@ -1072,7 +1072,7 @@ public class FsShell extends ToolBase {
         System.err.println(cmd.substring(1) + ": " + 
         System.err.println(cmd.substring(1) + ": " + 
                            ex.getLocalizedMessage());  
                            ex.getLocalizedMessage());  
       }
       }
-    } catch (IOException e ) {
+    } catch (IOException e) {
       //
       //
       // IO exception encountered locally.
       // IO exception encountered locally.
       // 
       // 

+ 2 - 2
src/java/org/apache/hadoop/fs/InMemoryFileSystem.java

@@ -89,7 +89,7 @@ public class InMemoryFileSystem extends ChecksumFileSystem {
      */
      */
     public String[][] getFileCacheHints(Path f, long start, long len)
     public String[][] getFileCacheHints(Path f, long start, long len)
       throws IOException {
       throws IOException {
-      if (! exists(f)) {
+      if (!exists(f)) {
         return null;
         return null;
       } else {
       } else {
         return new String[][] {{"inmemory"}};
         return new String[][] {{"inmemory"}};
@@ -194,7 +194,7 @@ public class InMemoryFileSystem extends ChecksumFileSystem {
                                      short replication, long blockSize, Progressable progress)
                                      short replication, long blockSize, Progressable progress)
       throws IOException {
       throws IOException {
       synchronized (this) {
       synchronized (this) {
-        if (exists(f) && ! overwrite) {
+        if (exists(f) && !overwrite) {
           throw new IOException("File already exists:"+f);
           throw new IOException("File already exists:"+f);
         }
         }
         FileAttributes fAttr =(FileAttributes) tempFileAttribs.remove(getPath(f));
         FileAttributes fAttr =(FileAttributes) tempFileAttribs.remove(getPath(f));

+ 4 - 4
src/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -34,7 +34,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
     super(new RawLocalFileSystem());
     super(new RawLocalFileSystem());
   }
   }
     
     
-  public LocalFileSystem( FileSystem rawLocalFileSystem ) {
+  public LocalFileSystem(FileSystem rawLocalFileSystem) {
     super(rawLocalFileSystem);
     super(rawLocalFileSystem);
   }
   }
     
     
@@ -44,13 +44,13 @@ public class LocalFileSystem extends ChecksumFileSystem {
   }
   }
 
 
   @Override
   @Override
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }
   }
 
 
   @Override
   @Override
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }
   }
@@ -88,7 +88,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
         }
         }
       }
       }
       String suffix = "." + new Random().nextInt();
       String suffix = "." + new Random().nextInt();
-      File badFile = new File(badDir,f.getName()+suffix);
+      File badFile = new File(badDir, f.getName()+suffix);
       LOG.warn("Moving bad file " + f + " to " + badFile);
       LOG.warn("Moving bad file " + f + " to " + badFile);
       in.close();                               // close it first
       in.close();                               // close it first
       f.renameTo(badFile);                      // rename it
       f.renameTo(badFile);                      // rename it

+ 2 - 2
src/java/org/apache/hadoop/fs/Path.java

@@ -176,7 +176,7 @@ public class Path implements Comparable {
   public Path getParent() {
   public Path getParent() {
     String path = uri.getPath();
     String path = uri.getPath();
     int lastSlash = path.lastIndexOf('/');
     int lastSlash = path.lastIndexOf('/');
-    int start = hasWindowsDrive(path,true) ? 3 : 0;
+    int start = hasWindowsDrive(path, true) ? 3 : 0;
     if ((path.length() == start) ||               // empty path
     if ((path.length() == start) ||               // empty path
         (lastSlash == start && path.length() == start+1)) { // at root
         (lastSlash == start && path.length() == start+1)) { // at root
       return null;
       return null;
@@ -186,7 +186,7 @@ public class Path implements Comparable {
       parent = "";
       parent = "";
     } else {
     } else {
       int end = hasWindowsDrive(path, true) ? 3 : 0;
       int end = hasWindowsDrive(path, true) ? 3 : 0;
-      parent = path.substring(0,lastSlash==end?end+1:lastSlash);
+      parent = path.substring(0, lastSlash==end?end+1:lastSlash);
     }
     }
     return new Path(uri.getScheme(), uri.getAuthority(), parent);
     return new Path(uri.getScheme(), uri.getAuthority(), parent);
   }
   }

+ 11 - 11
src/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -60,7 +60,7 @@ public class RawLocalFileSystem extends FileSystem {
    * Return null if otherwise.
    * Return null if otherwise.
    */
    */
   public String[][] getFileCacheHints(Path f, long start, long len) throws IOException {
   public String[][] getFileCacheHints(Path f, long start, long len) throws IOException {
-    if (! exists(f)) {
+    if (!exists(f)) {
       return null;
       return null;
     } else {
     } else {
       String result[][] = new String[1][];
       String result[][] = new String[1][];
@@ -183,7 +183,7 @@ public class RawLocalFileSystem extends FileSystem {
   public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
   public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
                                    short replication, long blockSize, Progressable progress)
                                    short replication, long blockSize, Progressable progress)
     throws IOException {
     throws IOException {
-    if (exists(f) && ! overwrite) {
+    if (exists(f) && !overwrite) {
       throw new IOException("File already exists:"+f);
       throw new IOException("File already exists:"+f);
     }
     }
     Path parent = f.getParent();
     Path parent = f.getParent();
@@ -201,9 +201,9 @@ public class RawLocalFileSystem extends FileSystem {
   }
   }
   
   
   /** Set the replication of the given file */
   /** Set the replication of the given file */
-  public boolean setReplication( Path src,
-                                 short replication
-                                 ) throws IOException {
+  public boolean setReplication(Path src,
+                                short replication
+                                ) throws IOException {
     return true;
     return true;
   }
   }
   
   
@@ -236,9 +236,9 @@ public class RawLocalFileSystem extends FileSystem {
     File localf = pathToFile(f);
     File localf = pathToFile(f);
     Path[] results;
     Path[] results;
     
     
-    if(!localf.exists())
+    if (!localf.exists())
       return null;
       return null;
-    else if(localf.isFile()) {
+    else if (localf.isFile()) {
       results = new Path[1];
       results = new Path[1];
       results[0] = f;
       results[0] = f;
       return results;
       return results;
@@ -270,12 +270,12 @@ public class RawLocalFileSystem extends FileSystem {
    * Set the working directory to the given directory.
    * Set the working directory to the given directory.
    */
    */
   @Override
   @Override
-    public void setWorkingDirectory(Path newDir) {
+  public void setWorkingDirectory(Path newDir) {
     workingDir = newDir;
     workingDir = newDir;
   }
   }
   
   
   @Override
   @Override
-    public Path getWorkingDirectory() {
+  public Path getWorkingDirectory() {
     return workingDir;
     return workingDir;
   }
   }
   
   
@@ -337,13 +337,13 @@ public class RawLocalFileSystem extends FileSystem {
   }
   }
   
   
   @Override
   @Override
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }
   }
   
   
   @Override
   @Override
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }
   }

+ 21 - 21
src/java/org/apache/hadoop/fs/s3/S3FileSystem.java

@@ -64,21 +64,21 @@ public class S3FileSystem extends FileSystem {
     FileSystemStore store = new Jets3tFileSystemStore();
     FileSystemStore store = new Jets3tFileSystemStore();
     
     
     RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
     RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-        conf.getInt("fs.s3.maxRetries", 4),
-        conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
+                                                                               conf.getInt("fs.s3.maxRetries", 4),
+                                                                               conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
     Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
     Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
       new HashMap<Class<? extends Exception>, RetryPolicy>();
       new HashMap<Class<? extends Exception>, RetryPolicy>();
     exceptionToPolicyMap.put(IOException.class, basePolicy);
     exceptionToPolicyMap.put(IOException.class, basePolicy);
     exceptionToPolicyMap.put(S3Exception.class, basePolicy);
     exceptionToPolicyMap.put(S3Exception.class, basePolicy);
     
     
     RetryPolicy methodPolicy = RetryPolicies.retryByException(
     RetryPolicy methodPolicy = RetryPolicies.retryByException(
-        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
+                                                              RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
     Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
     Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
     methodNameToPolicyMap.put("storeBlock", methodPolicy);
     methodNameToPolicyMap.put("storeBlock", methodPolicy);
     methodNameToPolicyMap.put("retrieveBlock", methodPolicy);
     methodNameToPolicyMap.put("retrieveBlock", methodPolicy);
     
     
     return (FileSystemStore) RetryProxy.create(FileSystemStore.class,
     return (FileSystemStore) RetryProxy.create(FileSystemStore.class,
-        store, methodNameToPolicyMap);
+                                               store, methodNameToPolicyMap);
   }
   }
   
   
   @Override
   @Override
@@ -116,7 +116,7 @@ public class S3FileSystem extends FileSystem {
       store.storeINode(absolutePath, INode.DIRECTORY_INODE);
       store.storeINode(absolutePath, INode.DIRECTORY_INODE);
     } else if (inode.isFile()) {
     } else if (inode.isFile()) {
       throw new IOException(String.format(
       throw new IOException(String.format(
-          "Can't make directory for path %s since it is a file.", absolutePath));
+                                          "Can't make directory for path %s since it is a file.", absolutePath));
     }
     }
     Path parent = absolutePath.getParent();
     Path parent = absolutePath.getParent();
     return (parent == null || mkdirs(parent));
     return (parent == null || mkdirs(parent));
@@ -167,8 +167,8 @@ public class S3FileSystem extends FileSystem {
 
 
   @Override
   @Override
   public FSDataOutputStream create(Path file, boolean overwrite, int bufferSize,
   public FSDataOutputStream create(Path file, boolean overwrite, int bufferSize,
-      short replication, long blockSize, Progressable progress)
-      throws IOException {
+                                   short replication, long blockSize, Progressable progress)
+    throws IOException {
 
 
     INode inode = store.retrieveINode(makeAbsolute(file));
     INode inode = store.retrieveINode(makeAbsolute(file));
     if (inode != null) {
     if (inode != null) {
@@ -185,16 +185,16 @@ public class S3FileSystem extends FileSystem {
         }
         }
       }      
       }      
     }
     }
-    return new FSDataOutputStream( 
-            new S3OutputStream(getConf(), store, makeAbsolute(file),
-                blockSize, progress), bufferSize );
+    return new FSDataOutputStream(
+                                  new S3OutputStream(getConf(), store, makeAbsolute(file),
+                                                     blockSize, progress), bufferSize);
   }
   }
 
 
   @Override
   @Override
   public FSDataInputStream open(Path path, int bufferSize) throws IOException {
   public FSDataInputStream open(Path path, int bufferSize) throws IOException {
     INode inode = checkFile(path);
     INode inode = checkFile(path);
-    return new FSDataInputStream( new S3InputStream(getConf(), store, inode),
-            bufferSize);
+    return new FSDataInputStream(new S3InputStream(getConf(), store, inode),
+                                 bufferSize);
   }
   }
 
 
   @Override
   @Override
@@ -262,7 +262,7 @@ public class S3FileSystem extends FileSystem {
         return false;
         return false;
       }
       }
       for (Path p : contents) {
       for (Path p : contents) {
-        if (! delete(p)) {
+        if (!delete(p)) {
           return false;
           return false;
         }
         }
       }
       }
@@ -301,7 +301,7 @@ public class S3FileSystem extends FileSystem {
    */
    */
   @Override
   @Override
   public boolean setReplication(Path path, short replication)
   public boolean setReplication(Path path, short replication)
-      throws IOException {
+    throws IOException {
     return true;
     return true;
   }
   }
 
 
@@ -328,7 +328,7 @@ public class S3FileSystem extends FileSystem {
    */
    */
   @Override
   @Override
   public String[][] getFileCacheHints(Path f, long start, long len)
   public String[][] getFileCacheHints(Path f, long start, long len)
-      throws IOException {
+    throws IOException {
     // TODO: Check this is the correct behavior
     // TODO: Check this is the correct behavior
     if (!exists(f)) {
     if (!exists(f)) {
       return null;
       return null;
@@ -337,14 +337,14 @@ public class S3FileSystem extends FileSystem {
   }
   }
 
 
   /** @deprecated */ @Deprecated
   /** @deprecated */ @Deprecated
-  @Override
-  public void lock(Path path, boolean shared) throws IOException {
+    @Override
+    public void lock(Path path, boolean shared) throws IOException {
     // TODO: Design and implement
     // TODO: Design and implement
   }
   }
 
 
   /** @deprecated */ @Deprecated
   /** @deprecated */ @Deprecated
-  @Override
-  public void release(Path path) throws IOException {
+    @Override
+    public void release(Path path) throws IOException {
     // TODO: Design and implement
     // TODO: Design and implement
   }
   }
 
 
@@ -360,13 +360,13 @@ public class S3FileSystem extends FileSystem {
 
 
   @Override
   @Override
   public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
   public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
-      throws IOException {
+    throws IOException {
     return tmpLocalFile;
     return tmpLocalFile;
   }
   }
 
 
   @Override
   @Override
   public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
   public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
-      throws IOException {
+    throws IOException {
     moveFromLocalFile(tmpLocalFile, fsOutputFile);
     moveFromLocalFile(tmpLocalFile, fsOutputFile);
   }
   }
 
 

+ 46 - 46
src/java/org/apache/hadoop/fs/s3/S3InputStream.java

@@ -37,65 +37,65 @@ class S3InputStream extends FSInputStream {
   }
   }
 
 
   @Override
   @Override
-    public synchronized long getPos() throws IOException {
-      return pos;
-    }
+  public synchronized long getPos() throws IOException {
+    return pos;
+  }
 
 
   @Override
   @Override
-    public synchronized int available() throws IOException {
-      return (int) (fileLength - pos);
-    }
+  public synchronized int available() throws IOException {
+    return (int) (fileLength - pos);
+  }
 
 
   @Override
   @Override
-    public synchronized void seek(long targetPos) throws IOException {
-      if (targetPos > fileLength) {
-        throw new IOException("Cannot seek after EOF");
-      }
-      pos = targetPos;
-      blockEnd = -1;
+  public synchronized void seek(long targetPos) throws IOException {
+    if (targetPos > fileLength) {
+      throw new IOException("Cannot seek after EOF");
     }
     }
+    pos = targetPos;
+    blockEnd = -1;
+  }
 
 
   @Override
   @Override
-    public synchronized boolean seekToNewSource(long targetPos) throws IOException {
-      return false;
-    }
+  public synchronized boolean seekToNewSource(long targetPos) throws IOException {
+    return false;
+  }
 
 
   @Override
   @Override
-    public synchronized int read() throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
+  public synchronized int read() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+    int result = -1;
+    if (pos < fileLength) {
+      if (pos > blockEnd) {
+        blockSeekTo(pos);
       }
       }
-      int result = -1;
-      if (pos < fileLength) {
-        if (pos > blockEnd) {
-          blockSeekTo(pos);
-        }
-        result = blockStream.read();
-        if (result >= 0) {
-          pos++;
-        }
+      result = blockStream.read();
+      if (result >= 0) {
+        pos++;
       }
       }
-      return result;
     }
     }
+    return result;
+  }
 
 
   @Override
   @Override
-    public synchronized int read(byte buf[], int off, int len) throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
+  public synchronized int read(byte buf[], int off, int len) throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+    if (pos < fileLength) {
+      if (pos > blockEnd) {
+        blockSeekTo(pos);
       }
       }
-      if (pos < fileLength) {
-        if (pos > blockEnd) {
-          blockSeekTo(pos);
-        }
-        int realLen = Math.min(len, (int) (blockEnd - pos + 1));
-        int result = blockStream.read(buf, off, realLen);
-        if (result >= 0) {
-          pos += result;
-        }
-        return result;
+      int realLen = Math.min(len, (int) (blockEnd - pos + 1));
+      int result = blockStream.read(buf, off, realLen);
+      if (result >= 0) {
+        pos += result;
       }
       }
-      return -1;
+      return result;
     }
     }
+    return -1;
+  }
 
 
   private synchronized void blockSeekTo(long target) throws IOException {
   private synchronized void blockSeekTo(long target) throws IOException {
     //
     //
@@ -132,7 +132,7 @@ class S3InputStream extends FSInputStream {
   }
   }
 
 
   @Override
   @Override
-    public void close() throws IOException {
+  public void close() throws IOException {
     if (closed) {
     if (closed) {
       throw new IOException("Stream closed");
       throw new IOException("Stream closed");
     }
     }
@@ -151,17 +151,17 @@ class S3InputStream extends FSInputStream {
    * We don't support marks.
    * We don't support marks.
    */
    */
   @Override
   @Override
-    public boolean markSupported() {
+  public boolean markSupported() {
     return false;
     return false;
   }
   }
 
 
   @Override
   @Override
-    public void mark(int readLimit) {
+  public void mark(int readLimit) {
     // Do nothing
     // Do nothing
   }
   }
 
 
   @Override
   @Override
-    public void reset() throws IOException {
+  public void reset() throws IOException {
     throw new IOException("Mark not supported");
     throw new IOException("Mark not supported");
   }
   }
 
 

+ 49 - 49
src/java/org/apache/hadoop/fs/s3/S3OutputStream.java

@@ -74,52 +74,52 @@ class S3OutputStream extends OutputStream {
   }
   }
 
 
   @Override
   @Override
-    public synchronized void write(int b) throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
-      }
+  public synchronized void write(int b) throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
 
 
-      if ((bytesWrittenToBlock + pos == blockSize) || (pos >= bufferSize)) {
-        flush();
-      }
-      outBuf[pos++] = (byte) b;
-      filePos++;
+    if ((bytesWrittenToBlock + pos == blockSize) || (pos >= bufferSize)) {
+      flush();
     }
     }
+    outBuf[pos++] = (byte) b;
+    filePos++;
+  }
 
 
   @Override
   @Override
-    public synchronized void write(byte b[], int off, int len) throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
-      }
-      while (len > 0) {
-        int remaining = bufferSize - pos;
-        int toWrite = Math.min(remaining, len);
-        System.arraycopy(b, off, outBuf, pos, toWrite);
-        pos += toWrite;
-        off += toWrite;
-        len -= toWrite;
-        filePos += toWrite;
-
-        if ((bytesWrittenToBlock + pos >= blockSize) || (pos == bufferSize)) {
-          flush();
-        }
+  public synchronized void write(byte b[], int off, int len) throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+    while (len > 0) {
+      int remaining = bufferSize - pos;
+      int toWrite = Math.min(remaining, len);
+      System.arraycopy(b, off, outBuf, pos, toWrite);
+      pos += toWrite;
+      off += toWrite;
+      len -= toWrite;
+      filePos += toWrite;
+
+      if ((bytesWrittenToBlock + pos >= blockSize) || (pos == bufferSize)) {
+        flush();
       }
       }
     }
     }
+  }
 
 
   @Override
   @Override
-    public synchronized void flush() throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
-      }
+  public synchronized void flush() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
 
 
-      if (bytesWrittenToBlock + pos >= blockSize) {
-        flushData((int) blockSize - bytesWrittenToBlock);
-      }
-      if (bytesWrittenToBlock == blockSize) {
-        endBlock();
-      }
-      flushData(pos);
+    if (bytesWrittenToBlock + pos >= blockSize) {
+      flushData((int) blockSize - bytesWrittenToBlock);
+    }
+    if (bytesWrittenToBlock == blockSize) {
+      endBlock();
     }
     }
+    flushData(pos);
+  }
 
 
   private synchronized void flushData(int maxPos) throws IOException {
   private synchronized void flushData(int maxPos) throws IOException {
     int workingPos = Math.min(pos, maxPos);
     int workingPos = Math.min(pos, maxPos);
@@ -179,22 +179,22 @@ class S3OutputStream extends OutputStream {
   }
   }
 
 
   @Override
   @Override
-    public synchronized void close() throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
-      }
+  public synchronized void close() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
 
 
-      flush();
-      if (filePos == 0 || bytesWrittenToBlock != 0) {
-        endBlock();
-      }
+    flush();
+    if (filePos == 0 || bytesWrittenToBlock != 0) {
+      endBlock();
+    }
 
 
-      backupStream.close();
-      backupFile.delete();
+    backupStream.close();
+    backupFile.delete();
 
 
-      super.close();
+    super.close();
 
 
-      closed = true;
-    }
+    closed = true;
+  }
 
 
 }
 }

+ 1 - 1
src/java/org/apache/hadoop/io/BytesWritable.java

@@ -197,7 +197,7 @@ public class BytesWritable implements WritableComparable {
                        byte[] b2, int s2, int l2) {
                        byte[] b2, int s2, int l2) {
       int size1 = readInt(b1, s1);
       int size1 = readInt(b1, s1);
       int size2 = readInt(b2, s2);
       int size2 = readInt(b2, s2);
-      return compareBytes(b1,s1+4, size1, b2, s2+4, size2);
+      return compareBytes(b1, s1+4, size1, b2, s2+4, size2);
     }
     }
   }
   }
   
   

+ 1 - 1
src/java/org/apache/hadoop/io/GenericWritable.java

@@ -70,7 +70,7 @@ public abstract class GenericWritable implements Writable {
       }
       }
     }
     }
     throw new RuntimeException("The type of instance is: "
     throw new RuntimeException("The type of instance is: "
-                + instance.getClass() + ", which is NOT registered.");
+                               + instance.getClass() + ", which is NOT registered.");
   }
   }
 
 
   /**
   /**

+ 17 - 17
src/java/org/apache/hadoop/io/MapFile.java

@@ -81,7 +81,7 @@ public class MapFile {
                   Class keyClass, Class valClass,
                   Class keyClass, Class valClass,
                   CompressionType compress, Progressable progress)
                   CompressionType compress, Progressable progress)
       throws IOException {
       throws IOException {
-      this(conf,fs,dirName,WritableComparator.get(keyClass),valClass,
+      this(conf, fs, dirName, WritableComparator.get(keyClass), valClass,
            compress, progress);
            compress, progress);
     }
     }
 
 
@@ -89,7 +89,7 @@ public class MapFile {
     public Writer(Configuration conf, FileSystem fs, String dirName,
     public Writer(Configuration conf, FileSystem fs, String dirName,
                   Class keyClass, Class valClass, CompressionType compress)
                   Class keyClass, Class valClass, CompressionType compress)
       throws IOException {
       throws IOException {
-      this(conf,fs,dirName,WritableComparator.get(keyClass),valClass,compress);
+      this(conf, fs, dirName, WritableComparator.get(keyClass), valClass, compress);
     }
     }
 
 
     /** Create the named map using the named key comparator. */
     /** Create the named map using the named key comparator. */
@@ -101,8 +101,8 @@ public class MapFile {
     }
     }
     /** Create the named map using the named key comparator. */
     /** Create the named map using the named key comparator. */
     public Writer(Configuration conf, FileSystem fs, String dirName,
     public Writer(Configuration conf, FileSystem fs, String dirName,
-                 WritableComparator comparator, Class valClass,
-                 SequenceFile.CompressionType compress)
+                  WritableComparator comparator, Class valClass,
+                  SequenceFile.CompressionType compress)
       throws IOException {
       throws IOException {
       this(conf, fs, dirName, comparator, valClass, compress, null);
       this(conf, fs, dirName, comparator, valClass, compress, null);
     }
     }
@@ -118,7 +118,7 @@ public class MapFile {
 
 
       Path dir = new Path(dirName);
       Path dir = new Path(dirName);
       if (!fs.mkdirs(dir)) {
       if (!fs.mkdirs(dir)) {
-          throw new IOException("Mkdirs failed to create directory " + dir.toString());
+        throw new IOException("Mkdirs failed to create directory " + dir.toString());
       }
       }
       Path dataFile = new Path(dir, DATA_FILE_NAME);
       Path dataFile = new Path(dir, DATA_FILE_NAME);
       Path indexFile = new Path(dir, INDEX_FILE_NAME);
       Path indexFile = new Path(dir, INDEX_FILE_NAME);
@@ -126,7 +126,7 @@ public class MapFile {
       Class keyClass = comparator.getKeyClass();
       Class keyClass = comparator.getKeyClass();
       this.data =
       this.data =
         SequenceFile.createWriter
         SequenceFile.createWriter
-        (fs,conf,dataFile,keyClass,valClass,compress,progress);
+        (fs, conf, dataFile, keyClass, valClass, compress, progress);
       this.index =
       this.index =
         SequenceFile.createWriter
         SequenceFile.createWriter
         (fs, conf, indexFile, keyClass, LongWritable.class,
         (fs, conf, indexFile, keyClass, LongWritable.class,
@@ -182,8 +182,8 @@ public class MapFile {
   public static class Reader {
   public static class Reader {
       
       
     /** Number of index entries to skip between each entry.  Zero by default.
     /** Number of index entries to skip between each entry.  Zero by default.
-    * Setting this to values larger than zero can facilitate opening large map
-    * files using less memory. */
+     * Setting this to values larger than zero can facilitate opening large map
+     * files using less memory. */
     private int INDEX_SKIP = 0;
     private int INDEX_SKIP = 0;
       
       
     private WritableComparator comparator;
     private WritableComparator comparator;
@@ -286,7 +286,7 @@ public class MapFile {
         }
         }
       } catch (EOFException e) {
       } catch (EOFException e) {
         SequenceFile.LOG.warn("Unexpected EOF reading " + index +
         SequenceFile.LOG.warn("Unexpected EOF reading " + index +
-                                 " at entry #" + count + ".  Ignoring.");
+                              " at entry #" + count + ".  Ignoring.");
       } finally {
       } finally {
 	indexClosed = true;
 	indexClosed = true;
         index.close();
         index.close();
@@ -306,7 +306,7 @@ public class MapFile {
 
 
       readIndex();
       readIndex();
       int pos = ((count - 1) / 2);              // middle of the index
       int pos = ((count - 1) / 2);              // middle of the index
-      if(pos < 0) {
+      if (pos < 0) {
         throw new IOException("MapFile empty");
         throw new IOException("MapFile empty");
       }
       }
       
       
@@ -357,7 +357,7 @@ public class MapFile {
 
 
       if (seekIndex != -1                         // seeked before
       if (seekIndex != -1                         // seeked before
           && seekIndex+1 < count           
           && seekIndex+1 < count           
-          && comparator.compare(key,keys[seekIndex+1])<0 // before next indexed
+          && comparator.compare(key, keys[seekIndex+1])<0 // before next indexed
           && comparator.compare(key, nextKey)
           && comparator.compare(key, nextKey)
           >= 0) {                                 // but after last seeked
           >= 0) {                                 // but after last seeked
         // do nothing
         // do nothing
@@ -431,9 +431,9 @@ public class MapFile {
      * @return          - returns the key that was the closest match or null if eof.
      * @return          - returns the key that was the closest match or null if eof.
      */
      */
     public synchronized WritableComparable getClosest(WritableComparable key, Writable val)
     public synchronized WritableComparable getClosest(WritableComparable key, Writable val)
-        throws IOException {
+      throws IOException {
       
       
-      if(seekInternal(key) > 0) {
+      if (seekInternal(key) > 0) {
         return null;
         return null;
       }
       }
       data.getCurrentValue(val);
       data.getCurrentValue(val);
@@ -442,7 +442,7 @@ public class MapFile {
 
 
     /** Close the map. */
     /** Close the map. */
     public synchronized void close() throws IOException {
     public synchronized void close() throws IOException {
-      if (! indexClosed) {
+      if (!indexClosed) {
 	index.close();
 	index.close();
       }
       }
       data.close();
       data.close();
@@ -482,7 +482,7 @@ public class MapFile {
    * @throws Exception
    * @throws Exception
    */
    */
   public static long fix(FileSystem fs, Path dir,
   public static long fix(FileSystem fs, Path dir,
-          Class keyClass, Class valueClass, boolean dryrun, Configuration conf) throws Exception {
+                         Class keyClass, Class valueClass, boolean dryrun, Configuration conf) throws Exception {
     String dr = (dryrun ? "[DRY RUN ] " : "");
     String dr = (dryrun ? "[DRY RUN ] " : "");
     Path data = new Path(dir, DATA_FILE_NAME);
     Path data = new Path(dir, DATA_FILE_NAME);
     Path index = new Path(dir, INDEX_FILE_NAME);
     Path index = new Path(dir, INDEX_FILE_NAME);
@@ -498,11 +498,11 @@ public class MapFile {
     SequenceFile.Reader dataReader = new SequenceFile.Reader(fs, data, conf);
     SequenceFile.Reader dataReader = new SequenceFile.Reader(fs, data, conf);
     if (!dataReader.getKeyClass().equals(keyClass)) {
     if (!dataReader.getKeyClass().equals(keyClass)) {
       throw new Exception(dr + "Wrong key class in " + dir + ", expected" + keyClass.getName() +
       throw new Exception(dr + "Wrong key class in " + dir + ", expected" + keyClass.getName() +
-              ", got " + dataReader.getKeyClass().getName());
+                          ", got " + dataReader.getKeyClass().getName());
     }
     }
     if (!dataReader.getValueClass().equals(valueClass)) {
     if (!dataReader.getValueClass().equals(valueClass)) {
       throw new Exception(dr + "Wrong value class in " + dir + ", expected" + valueClass.getName() +
       throw new Exception(dr + "Wrong value class in " + dir + ", expected" + valueClass.getName() +
-              ", got " + dataReader.getValueClass().getName());
+                          ", got " + dataReader.getValueClass().getName());
     }
     }
     long cnt = 0L;
     long cnt = 0L;
     Writable key = (Writable)ReflectionUtils.newInstance(keyClass, conf);
     Writable key = (Writable)ReflectionUtils.newInstance(keyClass, conf);

+ 5 - 5
src/java/org/apache/hadoop/io/ObjectWritable.java

@@ -147,8 +147,8 @@ public class ObjectWritable implements Writable, Configurable {
       } else {
       } else {
         throw new IllegalArgumentException("Not a primitive: "+declaredClass);
         throw new IllegalArgumentException("Not a primitive: "+declaredClass);
       }
       }
-    } else if (declaredClass.isEnum() ) {         // enum
-      UTF8.writeString( out, ((Enum)instance).name() );
+    } else if (declaredClass.isEnum()) {         // enum
+      UTF8.writeString(out, ((Enum)instance).name());
     } else if (Writable.class.isAssignableFrom(declaredClass)) { // Writable
     } else if (Writable.class.isAssignableFrom(declaredClass)) { // Writable
       UTF8.writeString(out, instance.getClass().getName());
       UTF8.writeString(out, instance.getClass().getName());
       ((Writable)instance).write(out);
       ((Writable)instance).write(out);
@@ -169,7 +169,7 @@ public class ObjectWritable implements Writable, Configurable {
   /** Read a {@link Writable}, {@link String}, primitive type, or an array of
   /** Read a {@link Writable}, {@link String}, primitive type, or an array of
    * the preceding. */
    * the preceding. */
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
-    public static Object readObject(DataInput in, ObjectWritable objectWritable, Configuration conf)
+  public static Object readObject(DataInput in, ObjectWritable objectWritable, Configuration conf)
     throws IOException {
     throws IOException {
     String className = UTF8.readString(in);
     String className = UTF8.readString(in);
     Class<?> declaredClass = PRIMITIVE_NAMES.get(className);
     Class<?> declaredClass = PRIMITIVE_NAMES.get(className);
@@ -216,8 +216,8 @@ public class ObjectWritable implements Writable, Configurable {
       
       
     } else if (declaredClass == String.class) {        // String
     } else if (declaredClass == String.class) {        // String
       instance = UTF8.readString(in);
       instance = UTF8.readString(in);
-    } else if( declaredClass.isEnum() ) {         // enum
-      instance = Enum.valueOf( (Class<? extends Enum>) declaredClass, UTF8.readString(in) );
+    } else if (declaredClass.isEnum()) {         // enum
+      instance = Enum.valueOf((Class<? extends Enum>) declaredClass, UTF8.readString(in));
     } else {                                      // Writable
     } else {                                      // Writable
       Class instanceClass = null;
       Class instanceClass = null;
       try {
       try {

+ 4 - 4
src/java/org/apache/hadoop/io/SequenceFile.java

@@ -109,7 +109,7 @@ public class SequenceFile {
     createWriter(FileSystem fs, Configuration conf, Path name, 
     createWriter(FileSystem fs, Configuration conf, Path name, 
                  Class keyClass, Class valClass) 
                  Class keyClass, Class valClass) 
     throws IOException {
     throws IOException {
-    return createWriter(fs,conf,name,keyClass,valClass,
+    return createWriter(fs, conf, name, keyClass, valClass,
                         getCompressionType(conf));
                         getCompressionType(conf));
   }
   }
   
   
@@ -679,7 +679,7 @@ public class SequenceFile {
       out.writeBoolean(this.isCompressed());
       out.writeBoolean(this.isCompressed());
       out.writeBoolean(this.isBlockCompressed());
       out.writeBoolean(this.isBlockCompressed());
       
       
-      if(this.isCompressed()) {
+      if (this.isCompressed()) {
         Text.writeString(out, (codec.getClass()).getName());
         Text.writeString(out, (codec.getClass()).getName());
       }
       }
       this.metadata.write(out);
       this.metadata.write(out);
@@ -698,7 +698,7 @@ public class SequenceFile {
       this.compress = compress;
       this.compress = compress;
       this.codec = codec;
       this.codec = codec;
       this.metadata = metadata;
       this.metadata = metadata;
-      if(this.codec != null) {
+      if (this.codec != null) {
         ReflectionUtils.setConf(this.codec, this.conf);
         ReflectionUtils.setConf(this.codec, this.conf);
         this.deflateFilter = this.codec.createOutputStream(buffer);
         this.deflateFilter = this.codec.createOutputStream(buffer);
         this.deflateOut = 
         this.deflateOut = 
@@ -2351,7 +2351,7 @@ public class SequenceFile {
         rawKey.reset();
         rawKey.reset();
         rawKey.write(ms.getKey().getData(), 0, ms.getKey().getLength());
         rawKey.write(ms.getKey().getData(), 0, ms.getKey().getLength());
         //load the raw value. Re-use the existing rawValue buffer
         //load the raw value. Re-use the existing rawValue buffer
-        if(rawValue == null)
+        if (rawValue == null)
           rawValue = ms.in.createValueBytes();
           rawValue = ms.in.createValueBytes();
         int valLength = ms.nextRawValue(rawValue);
         int valLength = ms.nextRawValue(rawValue);
 
 

+ 2 - 2
src/java/org/apache/hadoop/io/Text.java

@@ -202,7 +202,7 @@ public class Text implements WritableComparable {
    * increased to match. The existing contents of the buffer
    * increased to match. The existing contents of the buffer
    * (if any) are deleted.
    * (if any) are deleted.
    */
    */
-  private void setCapacity( int len ) {
+  private void setCapacity(int len) {
     if (bytes == null || bytes.length < len)
     if (bytes == null || bytes.length < len)
       bytes = new byte[len];      
       bytes = new byte[len];      
   }
   }
@@ -246,7 +246,7 @@ public class Text implements WritableComparable {
   /** Compare two Texts bytewise using standard UTF8 ordering. */
   /** Compare two Texts bytewise using standard UTF8 ordering. */
   public int compareTo(Object o) {
   public int compareTo(Object o) {
     Text that = (Text)o;
     Text that = (Text)o;
-    if(this == that)
+    if (this == that)
       return 0;
       return 0;
     else
     else
       return WritableComparator.compareBytes(bytes, 0, length,
       return WritableComparator.compareBytes(bytes, 0, length,

+ 1 - 1
src/java/org/apache/hadoop/io/VersionedWritable.java

@@ -45,7 +45,7 @@ public abstract class VersionedWritable implements Writable {
   public void readFields(DataInput in) throws IOException {
   public void readFields(DataInput in) throws IOException {
     byte version = in.readByte();                 // read version
     byte version = in.readByte();                 // read version
     if (version != getVersion())
     if (version != getVersion())
-      throw new VersionMismatchException(getVersion(),version);
+      throw new VersionMismatchException(getVersion(), version);
   }
   }
 
 
     
     

+ 16 - 16
src/java/org/apache/hadoop/io/WritableComparator.java

@@ -175,21 +175,21 @@ public class WritableComparator implements Comparator {
    * @return deserialized long
    * @return deserialized long
    */
    */
   public static long readVLong(byte[] bytes, int start) throws IOException {
   public static long readVLong(byte[] bytes, int start) throws IOException {
-      int len = bytes[start];
-      if (len >= -112) {
-          return len;
-      }
-      boolean isNegative = (len < -120);
-      len = isNegative ? -(len + 120) : -(len + 112);
-      if (start+1+len>bytes.length)
-          throw new IOException(
-                  "Not enough number of bytes for a zero-compressed integer");
-      long i = 0;
-      for (int idx = 0; idx < len; idx++) {
-          i = i << 8;
-          i = i | (bytes[start+1+idx] & 0xFF);
-      }
-      return (isNegative ? (i ^ -1L) : i);
+    int len = bytes[start];
+    if (len >= -112) {
+      return len;
+    }
+    boolean isNegative = (len < -120);
+    len = isNegative ? -(len + 120) : -(len + 112);
+    if (start+1+len>bytes.length)
+      throw new IOException(
+                            "Not enough number of bytes for a zero-compressed integer");
+    long i = 0;
+    for (int idx = 0; idx < len; idx++) {
+      i = i << 8;
+      i = i | (bytes[start+1+idx] & 0xFF);
+    }
+    return (isNegative ? (i ^ -1L) : i);
   }
   }
   
   
   /**
   /**
@@ -200,6 +200,6 @@ public class WritableComparator implements Comparator {
    * @return deserialized integer
    * @return deserialized integer
    */
    */
   public static int readVInt(byte[] bytes, int start) throws IOException {
   public static int readVInt(byte[] bytes, int start) throws IOException {
-      return (int) readVLong(bytes, start);
+    return (int) readVLong(bytes, start);
   }
   }
 }
 }

+ 6 - 6
src/java/org/apache/hadoop/io/WritableUtils.java

@@ -37,8 +37,8 @@ public final class WritableUtils  {
     byte[] outbuf = new byte[length];
     byte[] outbuf = new byte[length];
     ByteArrayOutputStream bos =  new ByteArrayOutputStream();
     ByteArrayOutputStream bos =  new ByteArrayOutputStream();
     int len;
     int len;
-    while((len=gzi.read(outbuf,0,outbuf.length)) != -1){
-      bos.write(outbuf,0,len);
+    while((len=gzi.read(outbuf, 0, outbuf.length)) != -1){
+      bos.write(outbuf, 0, len);
     }
     }
     byte[] decompressed =  bos.toByteArray();
     byte[] decompressed =  bos.toByteArray();
     bos.close();
     bos.close();
@@ -55,12 +55,12 @@ public final class WritableUtils  {
     if (bytes != null) {
     if (bytes != null) {
       ByteArrayOutputStream bos =  new ByteArrayOutputStream();
       ByteArrayOutputStream bos =  new ByteArrayOutputStream();
       GZIPOutputStream gzout = new GZIPOutputStream(bos);
       GZIPOutputStream gzout = new GZIPOutputStream(bos);
-      gzout.write(bytes,0,bytes.length);
+      gzout.write(bytes, 0, bytes.length);
       gzout.close();
       gzout.close();
       byte[] buffer = bos.toByteArray();
       byte[] buffer = bos.toByteArray();
       int len = buffer.length;
       int len = buffer.length;
       out.writeInt(len);
       out.writeInt(len);
-      out.write(buffer,0,len);
+      out.write(buffer, 0, len);
       /* debug only! Once we have confidence, can lose this. */
       /* debug only! Once we have confidence, can lose this. */
       return ((bytes.length != 0) ? (100*buffer.length)/bytes.length : 0);
       return ((bytes.length != 0) ? (100*buffer.length)/bytes.length : 0);
     } else {
     } else {
@@ -94,7 +94,7 @@ public final class WritableUtils  {
       byte[] buffer = s.getBytes("UTF-8");
       byte[] buffer = s.getBytes("UTF-8");
       int len = buffer.length;
       int len = buffer.length;
       out.writeInt(len);
       out.writeInt(len);
-      out.write(buffer,0,len);
+      out.write(buffer, 0, len);
     } else {
     } else {
       out.writeInt(-1);
       out.writeInt(-1);
     }
     }
@@ -183,7 +183,7 @@ public final class WritableUtils  {
    */
    */
   public static void displayByteArray(byte[] record){
   public static void displayByteArray(byte[] record){
     int i;
     int i;
-    for(i=0;i < record.length -1 ; i++){
+    for(i=0;i < record.length -1; i++){
       if (i % 16 == 0) { System.out.println(); }
       if (i % 16 == 0) { System.out.println(); }
       System.out.print(Integer.toHexString(record[i]  >> 4 & 0x0F));
       System.out.print(Integer.toHexString(record[i]  >> 4 & 0x0F));
       System.out.print(Integer.toHexString(record[i] & 0x0F));
       System.out.print(Integer.toHexString(record[i] & 0x0F));

+ 2 - 2
src/java/org/apache/hadoop/io/compress/BlockCompressorStream.java

@@ -44,7 +44,7 @@ class BlockCompressorStream extends CompressorStream {
    *                            algorithm with given bufferSize
    *                            algorithm with given bufferSize
    */
    */
   public BlockCompressorStream(OutputStream out, Compressor compressor, 
   public BlockCompressorStream(OutputStream out, Compressor compressor, 
-      int bufferSize, int compressionOverhead) {
+                               int bufferSize, int compressionOverhead) {
     super(out, compressor, bufferSize);
     super(out, compressor, bufferSize);
     MAX_INPUT_SIZE = bufferSize - compressionOverhead;
     MAX_INPUT_SIZE = bufferSize - compressionOverhead;
   }
   }
@@ -70,7 +70,7 @@ class BlockCompressorStream extends CompressorStream {
     if (b == null) {
     if (b == null) {
       throw new NullPointerException();
       throw new NullPointerException();
     } else if ((off < 0) || (off > b.length) || (len < 0) ||
     } else if ((off < 0) || (off > b.length) || (len < 0) ||
-            ((off + len) > b.length)) {
+               ((off + len) > b.length)) {
       throw new IndexOutOfBoundsException();
       throw new IndexOutOfBoundsException();
     } else if (len == 0) {
     } else if (len == 0) {
       return;
       return;

+ 2 - 2
src/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java

@@ -41,7 +41,7 @@ class BlockDecompressorStream extends DecompressorStream {
    * @param bufferSize size of buffer
    * @param bufferSize size of buffer
    */
    */
   public BlockDecompressorStream(InputStream in, Decompressor decompressor, 
   public BlockDecompressorStream(InputStream in, Decompressor decompressor, 
-      int bufferSize) {
+                                 int bufferSize) {
     super(in, decompressor, bufferSize);
     super(in, decompressor, bufferSize);
   }
   }
   
   
@@ -123,7 +123,7 @@ class BlockDecompressorStream extends DecompressorStream {
     int b3 = in.read();
     int b3 = in.read();
     int b4 = in.read();
     int b4 = in.read();
     if ((b1 | b2 | b3 | b4) < 0)
     if ((b1 | b2 | b3 | b4) < 0)
-        throw new EOFException();
+      throw new EOFException();
     return ((b1 << 24) + (b2 << 16) + (b3 << 8) + (b4 << 0));
     return ((b1 << 24) + (b2 << 16) + (b3 << 8) + (b4 << 0));
   }
   }
 }
 }

+ 1 - 1
src/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java

@@ -122,7 +122,7 @@ public class CompressionCodecFactory {
         buf.append(itr.next().getName());
         buf.append(itr.next().getName());
       }
       }
     }
     }
-    conf.set("io.compression.codecs",buf.toString());   
+    conf.set("io.compression.codecs", buf.toString());   
   }
   }
   
   
   /**
   /**

+ 19 - 19
src/java/org/apache/hadoop/io/compress/LzoCodec.java

@@ -43,11 +43,11 @@ public class LzoCodec implements Configurable, CompressionCodec {
   private Configuration conf;
   private Configuration conf;
   
   
   public void setConf(Configuration conf) {
   public void setConf(Configuration conf) {
-	  this.conf = conf;
+    this.conf = conf;
   }
   }
   
   
   public Configuration getConf() {
   public Configuration getConf() {
-	  return conf;
+    return conf;
   }
   }
 
 
   private static boolean nativeLzoLoaded = false;
   private static boolean nativeLzoLoaded = false;
@@ -55,7 +55,7 @@ public class LzoCodec implements Configurable, CompressionCodec {
   static {
   static {
     if (NativeCodeLoader.isNativeCodeLoaded()) {
     if (NativeCodeLoader.isNativeCodeLoaded()) {
       nativeLzoLoaded = LzoCompressor.isNativeLzoLoaded() &&
       nativeLzoLoaded = LzoCompressor.isNativeLzoLoaded() &&
-                          LzoDecompressor.isNativeLzoLoaded();
+        LzoDecompressor.isNativeLzoLoaded();
       
       
       if (nativeLzoLoaded) {
       if (nativeLzoLoaded) {
         LOG.info("Successfully loaded & initialized native-lzo library");
         LOG.info("Successfully loaded & initialized native-lzo library");
@@ -78,7 +78,7 @@ public class LzoCodec implements Configurable, CompressionCodec {
   }
   }
   
   
   public CompressionOutputStream createOutputStream(OutputStream out) 
   public CompressionOutputStream createOutputStream(OutputStream out) 
-  throws IOException {
+    throws IOException {
     // Ensure native-lzo library is loaded & initialized
     // Ensure native-lzo library is loaded & initialized
     if (!isNativeLzoLoaded()) {
     if (!isNativeLzoLoaded()) {
       throw new IOException("native-lzo library not available");
       throw new IOException("native-lzo library not available");
@@ -107,12 +107,12 @@ public class LzoCodec implements Configurable, CompressionCodec {
     // Create the lzo output-stream
     // Create the lzo output-stream
     LzoCompressor.CompressionStrategy strategy = 
     LzoCompressor.CompressionStrategy strategy = 
       LzoCompressor.CompressionStrategy.valueOf(
       LzoCompressor.CompressionStrategy.valueOf(
-              conf.get("io.compression.codec.lzo.compressor",
-                        LzoCompressor.CompressionStrategy.LZO1X_1.name()
-                      )
-                    ); 
+                                                conf.get("io.compression.codec.lzo.compressor",
+                                                         LzoCompressor.CompressionStrategy.LZO1X_1.name()
+                                                         )
+                                                ); 
     int bufferSize = conf.getInt("io.compression.codec.lzo.buffersize", 
     int bufferSize = conf.getInt("io.compression.codec.lzo.buffersize", 
-                                  64*1024);
+                                 64*1024);
     int compressionOverhead = 0;
     int compressionOverhead = 0;
     if (strategy.name().contains("LZO1")) {
     if (strategy.name().contains("LZO1")) {
       compressionOverhead = (int)(((bufferSize - (64 + 3)) * 16.0) / 17.0);  
       compressionOverhead = (int)(((bufferSize - (64 + 3)) * 16.0) / 17.0);  
@@ -121,12 +121,12 @@ public class LzoCodec implements Configurable, CompressionCodec {
     }
     }
      
      
     return new BlockCompressorStream(out, 
     return new BlockCompressorStream(out, 
-            new LzoCompressor(strategy, bufferSize), 
-            bufferSize, compressionOverhead);
+                                     new LzoCompressor(strategy, bufferSize), 
+                                     bufferSize, compressionOverhead);
   }
   }
   
   
   public CompressionInputStream createInputStream(InputStream in) 
   public CompressionInputStream createInputStream(InputStream in) 
-  throws IOException {
+    throws IOException {
     // Ensure native-lzo library is loaded & initialized
     // Ensure native-lzo library is loaded & initialized
     if (!isNativeLzoLoaded()) {
     if (!isNativeLzoLoaded()) {
       throw new IOException("native-lzo library not available");
       throw new IOException("native-lzo library not available");
@@ -135,16 +135,16 @@ public class LzoCodec implements Configurable, CompressionCodec {
     // Create the lzo input-stream
     // Create the lzo input-stream
     LzoDecompressor.CompressionStrategy strategy = 
     LzoDecompressor.CompressionStrategy strategy = 
       LzoDecompressor.CompressionStrategy.valueOf(
       LzoDecompressor.CompressionStrategy.valueOf(
-              conf.get("io.compression.codec.lzo.decompressor",
-                        LzoDecompressor.CompressionStrategy.LZO1X.name()
-                      )
-                    ); 
+                                                  conf.get("io.compression.codec.lzo.decompressor",
+                                                           LzoDecompressor.CompressionStrategy.LZO1X.name()
+                                                           )
+                                                  ); 
     int bufferSize = conf.getInt("io.compression.codec.lzo.buffersize", 
     int bufferSize = conf.getInt("io.compression.codec.lzo.buffersize", 
-                                  64*1024);
+                                 64*1024);
 
 
     return new BlockDecompressorStream(in, 
     return new BlockDecompressorStream(in, 
-            new LzoDecompressor(strategy, bufferSize), 
-            bufferSize);
+                                       new LzoDecompressor(strategy, bufferSize), 
+                                       bufferSize);
   }
   }
   
   
   /**
   /**

+ 3 - 3
src/java/org/apache/hadoop/io/compress/lzo/LzoCompressor.java

@@ -153,7 +153,7 @@ public class LzoCompressor implements Compressor {
       nativeLzoLoaded = true;
       nativeLzoLoaded = true;
     } else {
     } else {
       LOG.error("Cannot load " + LzoCompressor.class.getName() + 
       LOG.error("Cannot load " + LzoCompressor.class.getName() + 
-              " without native-hadoop library!");
+                " without native-hadoop library!");
     }
     }
   }
   }
   
   
@@ -220,7 +220,7 @@ public class LzoCompressor implements Compressor {
     // Reinitialize lzo's input direct buffer
     // Reinitialize lzo's input direct buffer
     uncompressedDirectBuf.rewind();
     uncompressedDirectBuf.rewind();
     ((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff,  
     ((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff,  
-                                          uncompressedDirectBufLen);
+                                            uncompressedDirectBufLen);
 
 
     // Note how much data is being fed to lzo
     // Note how much data is being fed to lzo
     userBufOff += uncompressedDirectBufLen;
     userBufOff += uncompressedDirectBufLen;
@@ -261,7 +261,7 @@ public class LzoCompressor implements Compressor {
   }
   }
 
 
   public synchronized int compress(byte[] b, int off, int len) 
   public synchronized int compress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     if (b == null) {
     if (b == null) {
       throw new NullPointerException();
       throw new NullPointerException();
     }
     }

+ 4 - 4
src/java/org/apache/hadoop/io/compress/lzo/LzoDecompressor.java

@@ -133,7 +133,7 @@ public class LzoDecompressor implements Decompressor {
       nativeLzoLoaded = true;
       nativeLzoLoaded = true;
     } else {
     } else {
       LOG.error("Cannot load " + LzoDecompressor.class.getName() + 
       LOG.error("Cannot load " + LzoDecompressor.class.getName() + 
-              " without native-hadoop library!");
+                " without native-hadoop library!");
     }
     }
   }
   }
   
   
@@ -202,7 +202,7 @@ public class LzoDecompressor implements Decompressor {
     // Reinitialize lzo's input direct-buffer
     // Reinitialize lzo's input direct-buffer
     compressedDirectBuf.rewind();
     compressedDirectBuf.rewind();
     ((ByteBuffer)compressedDirectBuf).put(userBuf, userBufOff, 
     ((ByteBuffer)compressedDirectBuf).put(userBuf, userBufOff, 
-                                        compressedDirectBufLen);
+                                          compressedDirectBufLen);
     
     
     // Note how much data is being fed to lzo
     // Note how much data is being fed to lzo
     userBufOff += compressedDirectBufLen;
     userBufOff += compressedDirectBufLen;
@@ -243,7 +243,7 @@ public class LzoDecompressor implements Decompressor {
   }
   }
 
 
   public synchronized int decompress(byte[] b, int off, int len) 
   public synchronized int decompress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     if (b == null) {
     if (b == null) {
       throw new NullPointerException();
       throw new NullPointerException();
     }
     }
@@ -255,7 +255,7 @@ public class LzoDecompressor implements Decompressor {
     
     
     // Check if there is uncompressed data
     // Check if there is uncompressed data
     n = uncompressedDirectBuf.remaining();
     n = uncompressedDirectBuf.remaining();
-    if(n > 0) {
+    if (n > 0) {
       n = Math.min(n, len);
       n = Math.min(n, len);
       ((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
       ((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
       return n;
       return n;

+ 7 - 7
src/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java

@@ -188,7 +188,7 @@ public class ZlibCompressor implements Compressor {
    * @param directBufferSize Size of the direct buffer to be used.
    * @param directBufferSize Size of the direct buffer to be used.
    */
    */
   public ZlibCompressor(CompressionLevel level, CompressionStrategy strategy, 
   public ZlibCompressor(CompressionLevel level, CompressionStrategy strategy, 
-      CompressionHeader header, int directBufferSize) {
+                        CompressionHeader header, int directBufferSize) {
     this.level = level;
     this.level = level;
     this.strategy = strategy;
     this.strategy = strategy;
     this.windowBits = header;
     this.windowBits = header;
@@ -209,9 +209,9 @@ public class ZlibCompressor implements Compressor {
    */
    */
   public ZlibCompressor() {
   public ZlibCompressor() {
     this(CompressionLevel.DEFAULT_COMPRESSION, 
     this(CompressionLevel.DEFAULT_COMPRESSION, 
-        CompressionStrategy.DEFAULT_STRATEGY, 
-        CompressionHeader.DEFAULT_HEADER, 
-        DEFAULT_DIRECT_BUFFER_SIZE);
+         CompressionStrategy.DEFAULT_STRATEGY, 
+         CompressionHeader.DEFAULT_HEADER, 
+         DEFAULT_DIRECT_BUFFER_SIZE);
   }
   }
   
   
   public synchronized void setInput(byte[] b, int off, int len) {
   public synchronized void setInput(byte[] b, int off, int len) {
@@ -242,7 +242,7 @@ public class ZlibCompressor implements Compressor {
     // Reinitialize zlib's input direct buffer
     // Reinitialize zlib's input direct buffer
     uncompressedDirectBuf.rewind();
     uncompressedDirectBuf.rewind();
     ((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff,  
     ((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff,  
-                                          uncompressedDirectBufLen);
+                                            uncompressedDirectBufLen);
 
 
     // Note how much data is being fed to zlib
     // Note how much data is being fed to zlib
     userBufOff += uncompressedDirectBufLen;
     userBufOff += uncompressedDirectBufLen;
@@ -289,7 +289,7 @@ public class ZlibCompressor implements Compressor {
   }
   }
 
 
   public synchronized int compress(byte[] b, int off, int len) 
   public synchronized int compress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     if (b == null) {
     if (b == null) {
       throw new NullPointerException();
       throw new NullPointerException();
     }
     }
@@ -369,7 +369,7 @@ public class ZlibCompressor implements Compressor {
   private native static void initIDs();
   private native static void initIDs();
   private native static long init(int level, int strategy, int windowBits);
   private native static long init(int level, int strategy, int windowBits);
   private native static void setDictionary(long strm, byte[] b, int off,
   private native static void setDictionary(long strm, byte[] b, int off,
-       int len);
+                                           int len);
   private native int deflateBytesDirect();
   private native int deflateBytesDirect();
   private native static long getBytesRead(long strm);
   private native static long getBytesRead(long strm);
   private native static long getBytesWritten(long strm);
   private native static long getBytesWritten(long strm);

+ 4 - 4
src/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java

@@ -147,7 +147,7 @@ public class ZlibDecompressor implements Decompressor {
     // Reinitialize zlib's input direct buffer
     // Reinitialize zlib's input direct buffer
     compressedDirectBuf.rewind();
     compressedDirectBuf.rewind();
     ((ByteBuffer)compressedDirectBuf).put(userBuf, userBufOff, 
     ((ByteBuffer)compressedDirectBuf).put(userBuf, userBufOff, 
-                                        compressedDirectBufLen);
+                                          compressedDirectBufLen);
     
     
     // Note how much data is being fed to zlib
     // Note how much data is being fed to zlib
     userBufOff += compressedDirectBufLen;
     userBufOff += compressedDirectBufLen;
@@ -195,7 +195,7 @@ public class ZlibDecompressor implements Decompressor {
   }
   }
 
 
   public synchronized int decompress(byte[] b, int off, int len) 
   public synchronized int decompress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     if (b == null) {
     if (b == null) {
       throw new NullPointerException();
       throw new NullPointerException();
     }
     }
@@ -207,7 +207,7 @@ public class ZlibDecompressor implements Decompressor {
     
     
     // Check if there is uncompressed data
     // Check if there is uncompressed data
     n = uncompressedDirectBuf.remaining();
     n = uncompressedDirectBuf.remaining();
-    if(n > 0) {
+    if (n > 0) {
       n = Math.min(n, len);
       n = Math.min(n, len);
       ((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
       ((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
       return n;
       return n;
@@ -278,7 +278,7 @@ public class ZlibDecompressor implements Decompressor {
   private native static void initIDs();
   private native static void initIDs();
   private native static long init(int windowBits);
   private native static long init(int windowBits);
   private native static void setDictionary(long strm, byte[] b, int off,
   private native static void setDictionary(long strm, byte[] b, int off,
-       int len);
+                                           int len);
   private native int inflateBytesDirect();
   private native int inflateBytesDirect();
   private native static long getBytesRead(long strm);
   private native static long getBytesRead(long strm);
   private native static long getBytesWritten(long strm);
   private native static long getBytesWritten(long strm);

+ 2 - 2
src/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -88,7 +88,7 @@ public class RetryPolicies {
    * </p>
    * </p>
    */
    */
   public static final RetryPolicy retryByException(RetryPolicy defaultPolicy,
   public static final RetryPolicy retryByException(RetryPolicy defaultPolicy,
-        Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
+                                                   Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
     return new ExceptionDependentRetry(defaultPolicy, exceptionToPolicyMap);
     return new ExceptionDependentRetry(defaultPolicy, exceptionToPolicyMap);
   }
   }
   
   
@@ -169,7 +169,7 @@ public class RetryPolicies {
     Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap;
     Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap;
     
     
     public ExceptionDependentRetry(RetryPolicy defaultPolicy,
     public ExceptionDependentRetry(RetryPolicy defaultPolicy,
-        Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
+                                   Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
       this.defaultPolicy = defaultPolicy;
       this.defaultPolicy = defaultPolicy;
       this.exceptionToPolicyMap = exceptionToPolicyMap;
       this.exceptionToPolicyMap = exceptionToPolicyMap;
     }
     }

+ 13 - 13
src/java/org/apache/hadoop/ipc/Client.java

@@ -65,7 +65,7 @@ public class Client {
     new Hashtable<InetSocketAddress, Connection>();
     new Hashtable<InetSocketAddress, Connection>();
 
 
   private Class valueClass;                       // class of call values
   private Class valueClass;                       // class of call values
-  private int timeout ;// timeout for calls
+  private int timeout;// timeout for calls
   private int counter;                            // counter for call ids
   private int counter;                            // counter for call ids
   private boolean running = true;                 // true while client runs
   private boolean running = true;                 // true while client runs
   private Configuration conf;
   private Configuration conf;
@@ -94,7 +94,7 @@ public class Client {
     /** Called by the connection thread when the call is complete and the
     /** Called by the connection thread when the call is complete and the
      * value or error string are available.  Notifies by default.  */
      * value or error string are available.  Notifies by default.  */
     public synchronized void callComplete() {
     public synchronized void callComplete() {
-        notify();                                 // notify caller
+      notify();                                 // notify caller
     }
     }
 
 
     /** Update lastActivity with the current time. */
     /** Update lastActivity with the current time. */
@@ -132,7 +132,7 @@ public class Client {
 
 
     public Connection(InetSocketAddress address) throws IOException {
     public Connection(InetSocketAddress address) throws IOException {
       if (address.isUnresolved()) {
       if (address.isUnresolved()) {
-         throw new UnknownHostException("unknown host: " + address.getHostName());
+        throw new UnknownHostException("unknown host: " + address.getHostName());
       }
       }
       this.address = address;
       this.address = address;
       this.setName("IPC Client connection to " + address.toString());
       this.setName("IPC Client connection to " + address.toString());
@@ -183,7 +183,7 @@ public class Client {
                }
                }
                return value;
                return value;
              }
              }
-          }));
+           }));
       this.out = new DataOutputStream
       this.out = new DataOutputStream
         (new BufferedOutputStream
         (new BufferedOutputStream
          (new FilterOutputStream(socket.getOutputStream()) {
          (new FilterOutputStream(socket.getOutputStream()) {
@@ -282,7 +282,7 @@ public class Client {
           decrementRef();
           decrementRef();
         }
         }
       } catch (EOFException eof) {
       } catch (EOFException eof) {
-          // This is what happens when the remote side goes down
+        // This is what happens when the remote side goes down
       } catch (Exception e) {
       } catch (Exception e) {
         LOG.info(StringUtils.stringifyException(e));
         LOG.info(StringUtils.stringifyException(e));
       } finally {
       } finally {
@@ -408,11 +408,11 @@ public class Client {
           while (i.hasNext()) {
           while (i.hasNext()) {
             Connection c = (Connection)i.next();
             Connection c = (Connection)i.next();
             if (c.isIdle()) { 
             if (c.isIdle()) { 
-            //We don't actually close the socket here (i.e., don't invoke
-            //the close() method). We leave that work to the response receiver
-            //thread. The reason for that is since we have taken a lock on the
-            //connections table object, we don't want to slow down the entire
-            //system if we happen to talk to a slow server.
+              //We don't actually close the socket here (i.e., don't invoke
+              //the close() method). We leave that work to the response receiver
+              //thread. The reason for that is since we have taken a lock on the
+              //connections table object, we don't want to slow down the entire
+              //system if we happen to talk to a slow server.
               i.remove();
               i.remove();
               synchronized (c) {
               synchronized (c) {
                 c.setCloseConnection();
                 c.setCloseConnection();
@@ -429,8 +429,8 @@ public class Client {
    * class. */
    * class. */
   public Client(Class valueClass, Configuration conf) {
   public Client(Class valueClass, Configuration conf) {
     this.valueClass = valueClass;
     this.valueClass = valueClass;
-    this.timeout = conf.getInt("ipc.client.timeout",10000);
-    this.maxIdleTime = conf.getInt("ipc.client.connection.maxidletime",1000);
+    this.timeout = conf.getInt("ipc.client.timeout", 10000);
+    this.maxIdleTime = conf.getInt("ipc.client.connection.maxidletime", 1000);
     this.maxRetries = conf.getInt("ipc.client.connect.max.retries", 10);
     this.maxRetries = conf.getInt("ipc.client.connect.max.retries", 10);
     this.conf = conf;
     this.conf = conf;
 
 
@@ -438,7 +438,7 @@ public class Client {
     t.setDaemon(true);
     t.setDaemon(true);
     t.setName(valueClass.getName() + " Connection Culler");
     t.setName(valueClass.getName() + " Connection Culler");
     LOG.debug(valueClass.getName() + 
     LOG.debug(valueClass.getName() + 
-             "Connection culler maxidletime= " + maxIdleTime + "ms");
+              "Connection culler maxidletime= " + maxIdleTime + "ms");
     t.start();
     t.start();
   }
   }
  
  

+ 11 - 11
src/java/org/apache/hadoop/ipc/RPC.java

@@ -144,7 +144,7 @@ public class RPC {
    * Stop all RPC client connections
    * Stop all RPC client connections
    */
    */
   public static synchronized void stopClient(){
   public static synchronized void stopClient(){
-    if(CLIENT != null)
+    if (CLIENT != null)
       CLIENT.stop();
       CLIENT.stop();
   }
   }
 
 
@@ -224,9 +224,9 @@ public class RPC {
     while (true) {
     while (true) {
       try {
       try {
         return getProxy(protocol, clientVersion, addr, conf);
         return getProxy(protocol, clientVersion, addr, conf);
-      } catch( ConnectException se ) {  // namenode has not been started
+      } catch(ConnectException se) {  // namenode has not been started
         LOG.info("Server at " + addr + " not available yet, Zzzzz...");
         LOG.info("Server at " + addr + " not available yet, Zzzzz...");
-      } catch( SocketTimeoutException te ) {  // namenode is busy
+      } catch(SocketTimeoutException te) {  // namenode is busy
         LOG.info("Problem connecting to server: " + addr);
         LOG.info("Problem connecting to server: " + addr);
       }
       }
       try {
       try {
@@ -241,9 +241,9 @@ public class RPC {
   public static VersionedProtocol getProxy(Class protocol, long clientVersion,
   public static VersionedProtocol getProxy(Class protocol, long clientVersion,
                                            InetSocketAddress addr, Configuration conf) throws IOException {
                                            InetSocketAddress addr, Configuration conf) throws IOException {
     VersionedProtocol proxy = (VersionedProtocol) Proxy.newProxyInstance(
     VersionedProtocol proxy = (VersionedProtocol) Proxy.newProxyInstance(
-                                  protocol.getClassLoader(),
-                                  new Class[] { protocol },
-                                  new Invoker(addr, conf));
+                                                                         protocol.getClassLoader(),
+                                                                         new Class[] { protocol },
+                                                                         new Invoker(addr, conf));
     long serverVersion = proxy.getProtocolVersion(protocol.getName(), 
     long serverVersion = proxy.getProtocolVersion(protocol.getName(), 
                                                   clientVersion);
                                                   clientVersion);
     if (serverVersion == clientVersion) {
     if (serverVersion == clientVersion) {
@@ -269,7 +269,7 @@ public class RPC {
     }
     }
 
 
     Object[] values =
     Object[] values =
-      (Object[])Array.newInstance(method.getReturnType(),wrappedValues.length);
+      (Object[])Array.newInstance(method.getReturnType(), wrappedValues.length);
     for (int i = 0; i < values.length; i++)
     for (int i = 0; i < values.length; i++)
       if (wrappedValues[i] != null)
       if (wrappedValues[i] != null)
         values[i] = ((ObjectWritable)wrappedValues[i]).get();
         values[i] = ((ObjectWritable)wrappedValues[i]).get();
@@ -280,7 +280,7 @@ public class RPC {
   /** Construct a server for a protocol implementation instance listening on a
   /** Construct a server for a protocol implementation instance listening on a
    * port and address. */
    * port and address. */
   public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) 
   public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) 
-  throws IOException {
+    throws IOException {
     return getServer(instance, bindAddress, port, 1, false, conf);
     return getServer(instance, bindAddress, port, 1, false, conf);
   }
   }
 
 
@@ -289,8 +289,8 @@ public class RPC {
   public static Server getServer(final Object instance, final String bindAddress, final int port,
   public static Server getServer(final Object instance, final String bindAddress, final int port,
                                  final int numHandlers,
                                  final int numHandlers,
                                  final boolean verbose, Configuration conf) 
                                  final boolean verbose, Configuration conf) 
-  throws IOException {
-    return new Server(instance, conf, bindAddress,port, numHandlers, verbose);
+    throws IOException {
+    return new Server(instance, conf, bindAddress, port, numHandlers, verbose);
   }
   }
 
 
   /** An RPC Server. */
   /** An RPC Server. */
@@ -306,7 +306,7 @@ public class RPC {
      * @param port the port to listen for connections on
      * @param port the port to listen for connections on
      */
      */
     public Server(Object instance, Configuration conf, String bindAddress, int port) 
     public Server(Object instance, Configuration conf, String bindAddress, int port) 
-    throws IOException {
+      throws IOException {
       this(instance, conf,  bindAddress, port, 1, false);
       this(instance, conf,  bindAddress, port, 1, false);
     }
     }
 
 

Alguns ficheiros não foram mostrados porque muitos ficheiros mudaram neste diff