Browse Source

HADOOP-1148. More indentation and spacing fixes.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@530556 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 years ago
parent
commit
91c1614934
100 changed files with 3027 additions and 3025 deletions
  1. 1 1
      build.xml
  2. 2 2
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJob.java
  3. 13 13
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java
  4. 27 27
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java
  5. 17 17
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java
  6. 2 2
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java
  7. 46 46
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java
  8. 23 23
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java
  9. 102 102
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java
  10. 3 3
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java
  11. 34 34
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java
  12. 3 3
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java
  13. 59 59
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java
  14. 14 14
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java
  15. 9 9
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java
  16. 7 7
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java
  17. 6 6
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java
  18. 10 10
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java
  19. 27 27
      src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java
  20. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java
  21. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java
  22. 3 3
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java
  23. 2 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
  24. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java
  25. 2 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
  26. 39 39
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
  27. 2 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java
  28. 1 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
  29. 6 6
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java
  30. 4 4
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
  31. 3 3
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java
  32. 1 1
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java
  33. 2 2
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java
  34. 13 13
      src/java/org/apache/hadoop/conf/Configuration.java
  35. 3 3
      src/java/org/apache/hadoop/dfs/Block.java
  36. 70 70
      src/java/org/apache/hadoop/dfs/BlockCommand.java
  37. 14 14
      src/java/org/apache/hadoop/dfs/ClientProtocol.java
  38. 37 37
      src/java/org/apache/hadoop/dfs/DFSClient.java
  39. 2 2
      src/java/org/apache/hadoop/dfs/DFSFileInfo.java
  40. 3 3
      src/java/org/apache/hadoop/dfs/DFSck.java
  41. 99 99
      src/java/org/apache/hadoop/dfs/DataNode.java
  42. 156 156
      src/java/org/apache/hadoop/dfs/DataStorage.java
  43. 23 23
      src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
  44. 7 7
      src/java/org/apache/hadoop/dfs/DatanodeID.java
  45. 16 16
      src/java/org/apache/hadoop/dfs/DatanodeInfo.java
  46. 9 9
      src/java/org/apache/hadoop/dfs/DatanodeProtocol.java
  47. 9 9
      src/java/org/apache/hadoop/dfs/DatanodeRegistration.java
  48. 2 2
      src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java
  49. 16 16
      src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
  50. 45 45
      src/java/org/apache/hadoop/dfs/FSDataset.java
  51. 66 66
      src/java/org/apache/hadoop/dfs/FSDirectory.java
  52. 77 77
      src/java/org/apache/hadoop/dfs/FSEditLog.java
  53. 235 235
      src/java/org/apache/hadoop/dfs/FSImage.java
  54. 232 230
      src/java/org/apache/hadoop/dfs/FSNamesystem.java
  55. 7 7
      src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java
  56. 8 8
      src/java/org/apache/hadoop/dfs/IncorrectVersionException.java
  57. 18 18
      src/java/org/apache/hadoop/dfs/JspHelper.java
  58. 86 86
      src/java/org/apache/hadoop/dfs/NameNode.java
  59. 24 24
      src/java/org/apache/hadoop/dfs/NamenodeFsck.java
  60. 7 7
      src/java/org/apache/hadoop/dfs/NamespaceInfo.java
  61. 2 2
      src/java/org/apache/hadoop/dfs/SafeModeException.java
  62. 3 3
      src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
  63. 159 159
      src/java/org/apache/hadoop/dfs/Storage.java
  64. 4 4
      src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java
  65. 4 4
      src/java/org/apache/hadoop/filecache/DistributedCache.java
  66. 22 22
      src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  67. 7 7
      src/java/org/apache/hadoop/fs/DF.java
  68. 9 9
      src/java/org/apache/hadoop/fs/FSDataInputStream.java
  69. 2 2
      src/java/org/apache/hadoop/fs/FSDataOutputStream.java
  70. 716 716
      src/java/org/apache/hadoop/fs/FileSystem.java
  71. 21 21
      src/java/org/apache/hadoop/fs/FileUtil.java
  72. 11 11
      src/java/org/apache/hadoop/fs/FilterFileSystem.java
  73. 54 54
      src/java/org/apache/hadoop/fs/FsShell.java
  74. 2 2
      src/java/org/apache/hadoop/fs/InMemoryFileSystem.java
  75. 4 4
      src/java/org/apache/hadoop/fs/LocalFileSystem.java
  76. 2 2
      src/java/org/apache/hadoop/fs/Path.java
  77. 11 11
      src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  78. 21 21
      src/java/org/apache/hadoop/fs/s3/S3FileSystem.java
  79. 46 46
      src/java/org/apache/hadoop/fs/s3/S3InputStream.java
  80. 49 49
      src/java/org/apache/hadoop/fs/s3/S3OutputStream.java
  81. 1 1
      src/java/org/apache/hadoop/io/BytesWritable.java
  82. 1 1
      src/java/org/apache/hadoop/io/GenericWritable.java
  83. 17 17
      src/java/org/apache/hadoop/io/MapFile.java
  84. 5 5
      src/java/org/apache/hadoop/io/ObjectWritable.java
  85. 4 4
      src/java/org/apache/hadoop/io/SequenceFile.java
  86. 2 2
      src/java/org/apache/hadoop/io/Text.java
  87. 1 1
      src/java/org/apache/hadoop/io/VersionedWritable.java
  88. 16 16
      src/java/org/apache/hadoop/io/WritableComparator.java
  89. 6 6
      src/java/org/apache/hadoop/io/WritableUtils.java
  90. 2 2
      src/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
  91. 2 2
      src/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
  92. 1 1
      src/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java
  93. 19 19
      src/java/org/apache/hadoop/io/compress/LzoCodec.java
  94. 3 3
      src/java/org/apache/hadoop/io/compress/lzo/LzoCompressor.java
  95. 4 4
      src/java/org/apache/hadoop/io/compress/lzo/LzoDecompressor.java
  96. 7 7
      src/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java
  97. 4 4
      src/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java
  98. 2 2
      src/java/org/apache/hadoop/io/retry/RetryPolicies.java
  99. 13 13
      src/java/org/apache/hadoop/ipc/Client.java
  100. 11 11
      src/java/org/apache/hadoop/ipc/RPC.java

+ 1 - 1
build.xml

@@ -482,7 +482,7 @@
   	
   	<checkstyle config="${test.src.dir}/checkstyle.xml"
   		failOnViolation="false">
-      <fileset dir="${src.dir}" includes="**/*.java"/>
+      <fileset dir="${src.dir}" includes="**/*.java" excludes="**/generated/**"/>
       <formatter type="xml" toFile="${test.build.dir}/checkstyle-errors.xml"/>
   	</checkstyle>
   	

+ 2 - 2
src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJob.java

@@ -80,7 +80,7 @@ import org.apache.hadoop.mapred.RunningJob;
 public class ValueAggregatorJob {
 
   public static JobControl createValueAggregatorJobs(String args[])
-      throws IOException {
+    throws IOException {
     JobControl theControl = new JobControl("ValueAggregatorJobs");
     ArrayList dependingJobs = new ArrayList();
     JobConf aJobConf = createValueAggregatorJob(args);
@@ -98,7 +98,7 @@ public class ValueAggregatorJob {
    * @throws IOException
    */
   public static JobConf createValueAggregatorJob(String args[])
-      throws IOException {
+    throws IOException {
 
     if (args.length < 2) {
       System.out.println("usage: inputDirs outDir [numOfReducer [textinputformat|seq [specfile [jobName]]]]");

+ 13 - 13
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HAbstractScanner.java

@@ -60,17 +60,17 @@ public abstract class HAbstractScanner implements HScannerInterface {
       String column = col.toString();
       try {
         int colpos = column.indexOf(":") + 1;
-        if(colpos == 0) {
+        if (colpos == 0) {
           throw new IllegalArgumentException("Column name has no family indicator.");
         }
 
         String columnkey = column.substring(colpos);
 
-        if(columnkey == null || columnkey.length() == 0) {
+        if (columnkey == null || columnkey.length() == 0) {
           this.matchType = MATCH_TYPE.FAMILY_ONLY;
           this.family = column.substring(0, colpos);
 
-        } else if(isRegexPattern.matcher(columnkey).matches()) {
+        } else if (isRegexPattern.matcher(columnkey).matches()) {
           this.matchType = MATCH_TYPE.REGEX;
           this.columnMatcher = Pattern.compile(column);
 
@@ -86,13 +86,13 @@ public abstract class HAbstractScanner implements HScannerInterface {
     // Matching method
     
     boolean matches(Text col) throws IOException {
-      if(this.matchType == MATCH_TYPE.SIMPLE) {
+      if (this.matchType == MATCH_TYPE.SIMPLE) {
         return col.equals(this.col);
         
-      } else if(this.matchType == MATCH_TYPE.FAMILY_ONLY) {
+      } else if (this.matchType == MATCH_TYPE.FAMILY_ONLY) {
         return col.toString().startsWith(this.family);
         
-      } else if(this.matchType == MATCH_TYPE.REGEX) {
+      } else if (this.matchType == MATCH_TYPE.REGEX) {
         return this.columnMatcher.matcher(col.toString()).matches();
         
       } else {
@@ -121,7 +121,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
     for(int i = 0; i < targetCols.length; i++) {
       Text family = HStoreKey.extractFamily(targetCols[i]);
       Vector<ColumnMatcher> matchers = okCols.get(family);
-      if(matchers == null) {
+      if (matchers == null) {
         matchers = new Vector<ColumnMatcher>();
       }
       matchers.add(new ColumnMatcher(targetCols[i]));
@@ -144,11 +144,11 @@ public abstract class HAbstractScanner implements HScannerInterface {
     Text column = keys[i].getColumn();
     Text family = HStoreKey.extractFamily(column);
     Vector<ColumnMatcher> matchers = okCols.get(family);
-    if(matchers == null) {
+    if (matchers == null) {
       return false;
     }
     for(int m = 0; m < matchers.size(); m++) {
-      if(matchers.get(m).matches(column)) {
+      if (matchers.get(m).matches(column)) {
         return true;
       }
     }
@@ -203,7 +203,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
     // Grab all the values that match this row/timestamp
 
     boolean insertedItem = false;
-    if(chosenRow != null) {
+    if (chosenRow != null) {
       key.setRow(chosenRow);
       key.setVersion(chosenTimestamp);
       key.setColumn(new Text(""));
@@ -215,7 +215,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
               && (keys[i].getRow().compareTo(chosenRow) == 0)
               && (keys[i].getTimestamp() == chosenTimestamp)) {
 
-          if(columnMatch(i)) {
+          if (columnMatch(i)) {
             outbuf.reset();
             vals[i].write(outbuf);
             byte byteresults[] = outbuf.getData();
@@ -226,7 +226,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
             insertedItem = true;
           }
 
-          if (! getNext(i)) {
+          if (!getNext(i)) {
             closeSubScanner(i);
           }
         }
@@ -237,7 +237,7 @@ public abstract class HAbstractScanner implements HScannerInterface {
         while((keys[i] != null)
               && ((keys[i].getRow().compareTo(chosenRow) <= 0)
                   || (keys[i].getTimestamp() > this.timestamp)
-                  || (! columnMatch(i)))) {
+                  || (!columnMatch(i)))) {
 
           getNext(i);
         }

+ 27 - 27
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HClient.java

@@ -95,12 +95,12 @@ public class HClient extends HGlobals implements HConstants {
   }
 
   public synchronized void openTable(Text tableName) throws IOException {
-    if(closed) {
+    if (closed) {
       throw new IllegalStateException("client is not open");
     }
 
     tableServers = tablesToServers.get(tableName);
-    if(tableServers == null ) {                 // We don't know where the table is
+    if (tableServers == null) {                 // We don't know where the table is
       findTableInMeta(tableName);               // Load the information from meta
     }
   }
@@ -108,9 +108,9 @@ public class HClient extends HGlobals implements HConstants {
   private void findTableInMeta(Text tableName) throws IOException {
     TreeMap<Text, TableInfo> metaServers = tablesToServers.get(META_TABLE_NAME);
     
-    if(metaServers == null) {                   // Don't know where the meta is
+    if (metaServers == null) {                   // Don't know where the meta is
       loadMetaFromRoot(tableName);
-      if(tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
+      if (tableName.equals(META_TABLE_NAME) || tableName.equals(ROOT_TABLE_NAME)) {
         // All we really wanted was the meta or root table
         return;
       }
@@ -119,7 +119,7 @@ public class HClient extends HGlobals implements HConstants {
 
     tableServers = new TreeMap<Text, TableInfo>();
     for(Iterator<TableInfo> i = metaServers.tailMap(tableName).values().iterator();
-        i.hasNext(); ) {
+        i.hasNext();) {
       
       TableInfo t = i.next();
       
@@ -133,7 +133,7 @@ public class HClient extends HGlobals implements HConstants {
    */
   private void loadMetaFromRoot(Text tableName) throws IOException {
     locateRootRegion();
-    if(tableName.equals(ROOT_TABLE_NAME)) {   // All we really wanted was the root
+    if (tableName.equals(ROOT_TABLE_NAME)) {   // All we really wanted was the root
       return;
     }
     scanRoot();
@@ -144,7 +144,7 @@ public class HClient extends HGlobals implements HConstants {
    * could be.
    */
   private void locateRootRegion() throws IOException {
-    if(master == null) {
+    if (master == null) {
       master = (HMasterInterface)RPC.getProxy(HMasterInterface.class, 
                                               HMasterInterface.versionID,
                                               masterLocation.getInetSocketAddress(), conf);
@@ -157,7 +157,7 @@ public class HClient extends HGlobals implements HConstants {
       while(rootRegionLocation == null && localTimeouts < numTimeouts) {
         rootRegionLocation = master.findRootRegion();
 
-        if(rootRegionLocation == null) {
+        if (rootRegionLocation == null) {
           try {
             Thread.sleep(clientTimeout);
 
@@ -166,7 +166,7 @@ public class HClient extends HGlobals implements HConstants {
           localTimeouts++;
         }
       }
-      if(rootRegionLocation == null) {
+      if (rootRegionLocation == null) {
         throw new IOException("Timed out trying to locate root region");
       }
       
@@ -174,7 +174,7 @@ public class HClient extends HGlobals implements HConstants {
       
       HRegionInterface rootRegion = getHRegionConnection(rootRegionLocation);
 
-      if(rootRegion.getRegionInfo(rootRegionInfo.regionName) != null) {
+      if (rootRegion.getRegionInfo(rootRegionInfo.regionName) != null) {
         tableServers = new TreeMap<Text, TableInfo>();
         tableServers.put(startRow, new TableInfo(rootRegionInfo, rootRegionLocation));
         tablesToServers.put(ROOT_TABLE_NAME, tableServers);
@@ -184,7 +184,7 @@ public class HClient extends HGlobals implements HConstants {
       
     } while(rootRegionLocation == null && tries++ < numRetries);
     
-    if(rootRegionLocation == null) {
+    if (rootRegionLocation == null) {
       closed = true;
       throw new IOException("unable to locate root region server");
     }
@@ -220,7 +220,7 @@ public class HClient extends HGlobals implements HConstants {
         HRegionInfo regionInfo = new HRegionInfo();
         regionInfo.readFields(inbuf);
         
-        if(! regionInfo.tableDesc.getName().equals(tableName)) {
+        if (!regionInfo.tableDesc.getName().equals(tableName)) {
           // We're done
           break;
         }
@@ -245,7 +245,7 @@ public class HClient extends HGlobals implements HConstants {
 
     HRegionInterface server = servers.get(regionServer.toString());
     
-    if(server == null) {                                // Get a connection
+    if (server == null) {                                // Get a connection
       
       server = (HRegionInterface)RPC.waitForProxy(HRegionInterface.class, 
                                                   HRegionInterface.versionID, regionServer.getInetSocketAddress(), conf);
@@ -257,7 +257,7 @@ public class HClient extends HGlobals implements HConstants {
 
   /** Close the connection to the HRegionServer */
   public synchronized void close() throws IOException {
-    if(! closed) {
+    if (!closed) {
       RPC.stopClient();
       closed = true;
     }
@@ -274,13 +274,13 @@ public class HClient extends HGlobals implements HConstants {
     TreeSet<HTableDescriptor> uniqueTables = new TreeSet<HTableDescriptor>();
     
     TreeMap<Text, TableInfo> metaTables = tablesToServers.get(META_TABLE_NAME);
-    if(metaTables == null) {
+    if (metaTables == null) {
       // Meta is not loaded yet so go do that
       loadMetaFromRoot(META_TABLE_NAME);
       metaTables = tablesToServers.get(META_TABLE_NAME);
     }
 
-    for(Iterator<TableInfo>i = metaTables.values().iterator(); i.hasNext(); ) {
+    for(Iterator<TableInfo>i = metaTables.values().iterator(); i.hasNext();) {
       TableInfo t = i.next();
       HRegionInterface server = getHRegionConnection(t.serverAddress);
       HScannerInterface scanner = null;
@@ -297,7 +297,7 @@ public class HClient extends HGlobals implements HConstants {
 
           // Only examine the rows where the startKey is zero length
           
-          if(info.startKey.getLength() == 0) {
+          if (info.startKey.getLength() == 0) {
             uniqueTables.add(info.tableDesc);
           }
           results.clear();
@@ -311,7 +311,7 @@ public class HClient extends HGlobals implements HConstants {
   }
 
   private TableInfo getTableInfo(Text row) {
-    if(tableServers == null) {
+    if (tableServers == null) {
       throw new IllegalStateException("Must open table first");
     }
     
@@ -335,7 +335,7 @@ public class HClient extends HGlobals implements HConstants {
                                                                           info.regionInfo.regionName, row, column, numVersions);
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-    for(int i = 0 ; i < values.length; i++) {
+    for(int i = 0; i < values.length; i++) {
       bytes.add(values[i].get());
     }
     return bytes.toArray(new byte[values.length][]);
@@ -351,7 +351,7 @@ public class HClient extends HGlobals implements HConstants {
                                                                           info.regionInfo.regionName, row, column, timestamp, numVersions);
     
     ArrayList<byte[]> bytes = new ArrayList<byte[]>();
-    for(int i = 0 ; i < values.length; i++) {
+    for(int i = 0; i < values.length; i++) {
       bytes.add(values[i].get());
     }
     return bytes.toArray(new byte[values.length][]);
@@ -369,7 +369,7 @@ public class HClient extends HGlobals implements HConstants {
    * Return the specified columns.
    */
   public HScannerInterface obtainScanner(Text[] columns, Text startRow) throws IOException {
-    if(tableServers == null) {
+    if (tableServers == null) {
       throw new IllegalStateException("Must open table first");
     }
     return new ClientScanner(columns, startRow);
@@ -481,11 +481,11 @@ public class HClient extends HGlobals implements HConstants {
      * Returns false if there are no more scanners.
      */
     private boolean nextScanner() throws IOException {
-      if(scanner != null) {
+      if (scanner != null) {
         scanner.close();
       }
       currentRegion += 1;
-      if(currentRegion == regions.length) {
+      if (currentRegion == regions.length) {
         close();
         return false;
       }
@@ -505,13 +505,13 @@ public class HClient extends HGlobals implements HConstants {
      * @see org.apache.hadoop.hbase.HScannerInterface#next(org.apache.hadoop.hbase.HStoreKey, java.util.TreeMap)
      */
     public boolean next(HStoreKey key, TreeMap<Text, byte[]> results) throws IOException {
-      if(closed) {
+      if (closed) {
         return false;
       }
       boolean status = scanner.next(key, results);
-      if(! status) {
+      if (!status) {
         status = nextScanner();
-        if(status) {
+        if (status) {
           status = scanner.next(key, results);
         }
       }
@@ -522,7 +522,7 @@ public class HClient extends HGlobals implements HConstants {
      * @see org.apache.hadoop.hbase.HScannerInterface#close()
      */
     public void close() throws IOException {
-      if(scanner != null) {
+      if (scanner != null) {
         scanner.close();
       }
       server = null;

+ 17 - 17
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLog.java

@@ -101,12 +101,12 @@ public class HLog {
       newlog.close();
     }
     
-    if(fs.exists(srcDir)) {
+    if (fs.exists(srcDir)) {
       
-      if(! fs.delete(srcDir)) {
+      if (!fs.delete(srcDir)) {
         LOG.error("Cannot delete: " + srcDir);
         
-        if(! FileUtil.fullyDelete(new File(srcDir.toString()))) {
+        if (!FileUtil.fullyDelete(new File(srcDir.toString()))) {
           throw new IOException("Cannot delete: " + srcDir);
         }
       }
@@ -127,7 +127,7 @@ public class HLog {
     this.conf = conf;
     this.logSeqNum = 0;
 
-    if(fs.exists(dir)) {
+    if (fs.exists(dir)) {
       throw new IOException("Target HLog directory already exists: " + dir);
     }
     fs.mkdirs(dir);
@@ -154,7 +154,7 @@ public class HLog {
 
       Vector<Path> toDeleteList = new Vector<Path>();
       synchronized(this) {
-        if(closed) {
+        if (closed) {
           throw new IOException("Cannot roll log; log is closed");
         }
 
@@ -174,10 +174,10 @@ public class HLog {
 
         // Close the current writer (if any), and grab a new one.
         
-        if(writer != null) {
+        if (writer != null) {
           writer.close();
           
-          if(filenum > 0) {
+          if (filenum > 0) {
             outputfiles.put(logSeqNum-1, computeFilename(filenum-1));
           }
         }
@@ -192,10 +192,10 @@ public class HLog {
         // over all the regions.
 
         long oldestOutstandingSeqNum = Long.MAX_VALUE;
-        for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext(); ) {
+        for(Iterator<Long> it = regionToLastFlush.values().iterator(); it.hasNext();) {
           long curSeqNum = it.next().longValue();
           
-          if(curSeqNum < oldestOutstandingSeqNum) {
+          if (curSeqNum < oldestOutstandingSeqNum) {
             oldestOutstandingSeqNum = curSeqNum;
           }
         }
@@ -205,10 +205,10 @@ public class HLog {
 
         LOG.debug("removing old log files");
         
-        for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Long> it = outputfiles.keySet().iterator(); it.hasNext();) {
           long maxSeqNum = it.next().longValue();
           
-          if(maxSeqNum < oldestOutstandingSeqNum) {
+          if (maxSeqNum < oldestOutstandingSeqNum) {
             Path p = outputfiles.get(maxSeqNum);
             it.remove();
             toDeleteList.add(p);
@@ -221,7 +221,7 @@ public class HLog {
 
       // Actually delete them, if any!
 
-      for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext(); ) {
+      for(Iterator<Path> it = toDeleteList.iterator(); it.hasNext();) {
         Path p = it.next();
         fs.delete(p);
       }
@@ -262,7 +262,7 @@ public class HLog {
    * We need to seize a lock on the writer so that writes are atomic.
    */
   public synchronized void append(Text regionName, Text tableName, Text row, TreeMap<Text, byte[]> columns, long timestamp) throws IOException {
-    if(closed) {
+    if (closed) {
       throw new IOException("Cannot append; log is closed");
     }
     
@@ -273,12 +273,12 @@ public class HLog {
     // that don't have any flush yet, the relevant operation is the
     // first one that's been added.
     
-    if(regionToLastFlush.get(regionName) == null) {
+    if (regionToLastFlush.get(regionName) == null) {
       regionToLastFlush.put(regionName, seqNum[0]);
     }
 
     int counter = 0;
-    for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext();) {
       Text column = it.next();
       byte[] val = columns.get(column);
       HLogKey logKey = new HLogKey(regionName, tableName, row, seqNum[counter++]);
@@ -333,11 +333,11 @@ public class HLog {
 
   /** Complete the cache flush */
   public synchronized void completeCacheFlush(Text regionName, Text tableName, long logSeqId) throws IOException {
-    if(closed) {
+    if (closed) {
       return;
     }
     
-    if(! insideCacheFlush) {
+    if (!insideCacheFlush) {
       throw new IOException("Impossible situation: inside completeCacheFlush(), but 'insideCacheFlush' flag is false");
     }
     

+ 2 - 2
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HLogKey.java

@@ -80,10 +80,10 @@ public class HLogKey implements WritableComparable {
     HLogKey other = (HLogKey) o;
     int result = this.regionName.compareTo(other.regionName);
     
-    if(result == 0) {
+    if (result == 0) {
       result = this.row.compareTo(other.row);
       
-      if(result == 0) {
+      if (result == 0) {
         
         if (this.logSeqNum < other.logSeqNum) {
           result = -1;

+ 46 - 46
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMaster.java

@@ -108,7 +108,7 @@ public class HMaster extends HGlobals
       };
       Text firstRow = new Text();
   
-      while((! closed)) {
+      while((!closed)) {
         int metaRegions = 0;
         while(rootRegionLocation == null) {
           try {
@@ -155,8 +155,8 @@ public class HMaster extends HGlobals
             HServerInfo storedInfo = null;
             synchronized(serversToServerInfo) {
               storedInfo = serversToServerInfo.get(serverName);
-              if(storedInfo == null
-                 || storedInfo.getStartCode() != startCode) {
+              if (storedInfo == null
+                  || storedInfo.getStartCode() != startCode) {
               
                 // The current assignment is no good; load the region.
   
@@ -261,8 +261,8 @@ public class HMaster extends HGlobals
           HServerInfo storedInfo = null;
           synchronized(serversToServerInfo) {
             storedInfo = serversToServerInfo.get(serverName);
-            if(storedInfo == null
-               || storedInfo.getStartCode() != startCode) {
+            if (storedInfo == null
+                || storedInfo.getStartCode() != startCode) {
             
               // The current assignment is no good; load the region.
 
@@ -285,16 +285,16 @@ public class HMaster extends HGlobals
     }
 
     public void run() {
-      while((! closed)) {
+      while((!closed)) {
         MetaRegion region = null;
         
         while(region == null) {
           synchronized(metaRegionsToScan) {
-            if(metaRegionsToScan.size() != 0) {
+            if (metaRegionsToScan.size() != 0) {
               region = metaRegionsToScan.remove(0);
             }
           }
-          if(region == null) {
+          if (region == null) {
             try {
               metaRegionsToScan.wait();
               
@@ -307,7 +307,7 @@ public class HMaster extends HGlobals
         
         synchronized(knownMetaRegions) {
           knownMetaRegions.put(region.startKey, region);
-          if(rootScanned && knownMetaRegions.size() == numMetaRegions) {
+          if (rootScanned && knownMetaRegions.size() == numMetaRegions) {
             allMetaRegionsScanned = true;
             allMetaRegionsScanned.notifyAll();
           }
@@ -319,7 +319,7 @@ public class HMaster extends HGlobals
           
           } catch(InterruptedException ex) {
           }
-          if(! allMetaRegionsScanned) {
+          if (!allMetaRegionsScanned) {
             break;                              // A region must have split
           }
           
@@ -328,7 +328,7 @@ public class HMaster extends HGlobals
           Vector<MetaRegion> v = new Vector<MetaRegion>();
           v.addAll(knownMetaRegions.values());
           
-          for(Iterator<MetaRegion> i = v.iterator(); i.hasNext(); ) {
+          for(Iterator<MetaRegion> i = v.iterator(); i.hasNext();) {
             scanRegion(i.next());
           }
         } while(true);
@@ -391,12 +391,12 @@ public class HMaster extends HGlobals
 
     // Make sure the root directory exists!
     
-    if(! fs.exists(dir)) {
+    if (!fs.exists(dir)) {
       fs.mkdirs(dir);
     }
 
     Path rootRegionDir = HStoreFile.getHRegionDir(dir, rootRegionInfo.regionName);
-    if(! fs.exists(rootRegionDir)) {
+    if (!fs.exists(rootRegionDir)) {
       
       // Bootstrap! Need to create the root region and the first meta region.
       //TODO is the root region self referential?
@@ -521,7 +521,7 @@ public class HMaster extends HGlobals
     synchronized(serversToServerInfo) {
       storedInfo = serversToServerInfo.get(server);
         
-      if(storedInfo != null) {
+      if (storedInfo != null) {
         serversToServerInfo.remove(server);
 
         synchronized(msgQueue) {
@@ -548,7 +548,7 @@ public class HMaster extends HGlobals
     synchronized(serversToServerInfo) {
       HServerInfo storedInfo = serversToServerInfo.get(server);
       
-      if(storedInfo == null) {
+      if (storedInfo == null) {
         
         // The HBaseMaster may have been restarted.
         // Tell the RegionServer to start over and call regionServerStartup()
@@ -557,7 +557,7 @@ public class HMaster extends HGlobals
         returnMsgs[0] = new HMsg(HMsg.MSG_CALL_SERVER_STARTUP);
         return returnMsgs;
         
-      } else if(storedInfo.getStartCode() != serverInfo.getStartCode()) {
+      } else if (storedInfo.getStartCode() != serverInfo.getStartCode()) {
         
         // This state is reachable if:
         //
@@ -597,9 +597,9 @@ public class HMaster extends HGlobals
     // Process the kill list
     
     TreeMap<Text, HRegionInfo> regionsToKill = killList.get(info.toString());
-    if(regionsToKill != null) {
+    if (regionsToKill != null) {
       for(Iterator<HRegionInfo> i = regionsToKill.values().iterator();
-          i.hasNext(); ) {
+          i.hasNext();) {
         
         returnMsgs.add(new HMsg(HMsg.MSG_REGION_CLOSE_AND_DELETE, i.next()));
       }
@@ -616,7 +616,7 @@ public class HMaster extends HGlobals
         case HMsg.MSG_REPORT_OPEN:
           HRegionInfo regionInfo = unassignedRegions.get(region.regionName);
 
-          if(regionInfo == null) {
+          if (regionInfo == null) {
 
             // This Region should not have been opened.
             // Ask the server to shut it down, but don't report it as closed.  
@@ -632,7 +632,7 @@ public class HMaster extends HGlobals
             unassignedRegions.remove(region.regionName);
             assignAttempts.remove(region.regionName);
 
-            if(region.regionName.compareTo(rootRegionInfo.regionName) == 0) {
+            if (region.regionName.compareTo(rootRegionInfo.regionName) == 0) {
 
               // Store the Root Region location (in memory)
 
@@ -643,7 +643,7 @@ public class HMaster extends HGlobals
               rootRegionLocation.notifyAll();
               break;
               
-            } else if(region.regionName.find(META_TABLE_NAME.toString()) == 0) {
+            } else if (region.regionName.find(META_TABLE_NAME.toString()) == 0) {
 
               // It's a meta region. Put it on the queue to be scanned.
               
@@ -668,7 +668,7 @@ public class HMaster extends HGlobals
           break;
 
         case HMsg.MSG_REPORT_CLOSE:
-          if(region.regionName.compareTo(rootRegionInfo.regionName) == 0) { // Root region
+          if (region.regionName.compareTo(rootRegionInfo.regionName) == 0) { // Root region
             rootRegionLocation = null;
             unassignedRegions.put(region.regionName, region);
             assignAttempts.put(region.regionName, 0L);
@@ -676,10 +676,10 @@ public class HMaster extends HGlobals
           } else {
             boolean reassignRegion = true;
             
-            if(regionsToKill.containsKey(region.regionName)) {
+            if (regionsToKill.containsKey(region.regionName)) {
               regionsToKill.remove(region.regionName);
               
-              if(regionsToKill.size() > 0) {
+              if (regionsToKill.size() > 0) {
                 killList.put(info.toString(), regionsToKill);
                 
               } else {
@@ -701,7 +701,7 @@ public class HMaster extends HGlobals
           break;
 
         case HMsg.MSG_NEW_REGION:
-          if(region.regionName.find(META_TABLE_NAME.toString()) == 0) {
+          if (region.regionName.find(META_TABLE_NAME.toString()) == 0) {
             // A meta region has split.
             
             allMetaRegionsScanned = false;
@@ -720,7 +720,7 @@ public class HMaster extends HGlobals
 
       // Figure out what the RegionServer ought to do, and write back.
 
-      if(unassignedRegions.size() > 0) {
+      if (unassignedRegions.size() > 0) {
 
         // Open new regions as necessary
 
@@ -731,20 +731,20 @@ public class HMaster extends HGlobals
         long now = System.currentTimeMillis();
 
         for(Iterator<Text> it = unassignedRegions.keySet().iterator();
-            it.hasNext(); ) {
+            it.hasNext();) {
 
           Text curRegionName = it.next();
           HRegionInfo regionInfo = unassignedRegions.get(curRegionName);
           long assignedTime = assignAttempts.get(curRegionName);
 
-          if(now - assignedTime > maxRegionOpenTime) {
+          if (now - assignedTime > maxRegionOpenTime) {
             returnMsgs.add(new HMsg(HMsg.MSG_REGION_OPEN, regionInfo));
 
             assignAttempts.put(curRegionName, now);
             counter++;
           }
 
-          if(counter >= targetForServer) {
+          if (counter >= targetForServer) {
             break;
           }
         }
@@ -762,7 +762,7 @@ public class HMaster extends HGlobals
     }
     
     public void run() {
-      while(! closed) {
+      while(!closed) {
         PendingOperation op = null;
         
         synchronized(msgQueue) {
@@ -827,7 +827,7 @@ public class HMaster extends HGlobals
           byte serverBytes[] = results.get(META_COL_SERVER);
           String serverName = new String(serverBytes, UTF8_ENCODING);
 
-          if(deadServer.compareTo(serverName) != 0) {
+          if (deadServer.compareTo(serverName) != 0) {
             // This isn't the server you're looking for - move along
             continue;
           }
@@ -835,7 +835,7 @@ public class HMaster extends HGlobals
           byte startCodeBytes[] = results.get(META_COL_STARTCODE);
           long startCode = Long.decode(new String(startCodeBytes, UTF8_ENCODING));
 
-          if(oldStartCode != startCode) {
+          if (oldStartCode != startCode) {
             // Close but no cigar
             continue;
           }
@@ -869,7 +869,7 @@ public class HMaster extends HGlobals
       // Put all the regions we found on the unassigned region list
 
       for(Iterator<Map.Entry<Text, HRegionInfo>> i = regions.entrySet().iterator();
-          i.hasNext(); ) {
+          i.hasNext();) {
 
         Map.Entry<Text, HRegionInfo> e = i.next();
         Text region = e.getKey();
@@ -903,7 +903,7 @@ public class HMaster extends HGlobals
       
       scanMetaRegion(server, scanner, rootRegionInfo.regionName);
       for(Iterator<MetaRegion> i = knownMetaRegions.values().iterator();
-          i.hasNext(); ) {
+          i.hasNext();) {
         
         MetaRegion r = i.next();
 
@@ -929,7 +929,7 @@ public class HMaster extends HGlobals
       // If the region closing down is a meta region then we need to update
       // the ROOT table
       
-      if(this.regionInfo.regionName.find(metaTableDesc.getName().toString()) == 0) {
+      if (this.regionInfo.regionName.find(metaTableDesc.getName().toString()) == 0) {
         this.rootRegion = true;
         
       } else {
@@ -954,7 +954,7 @@ public class HMaster extends HGlobals
 
       Text metaRegionName;
       HRegionInterface server;
-      if(rootRegion) {
+      if (rootRegion) {
         metaRegionName = rootRegionInfo.regionName;
         server = client.getHRegionConnection(rootRegionLocation);
         
@@ -969,7 +969,7 @@ public class HMaster extends HGlobals
       server.delete(metaRegionName, clientId, lockid, META_COL_STARTCODE);
       server.commit(metaRegionName, clientId, lockid);
       
-      if(reassignRegion) {
+      if (reassignRegion) {
         synchronized(unassignedRegions) {
           unassignedRegions.put(regionInfo.regionName, regionInfo);
           assignAttempts.put(regionInfo.regionName, 0L);
@@ -986,7 +986,7 @@ public class HMaster extends HGlobals
     BytesWritable startCode;
     
     public PendingOpenReport(HServerInfo info, Text regionName) {
-      if(regionName.find(metaTableDesc.getName().toString()) == 0) {
+      if (regionName.find(metaTableDesc.getName().toString()) == 0) {
         
         // The region which just came on-line is a META region.
         // We need to look in the ROOT region for its information.
@@ -1030,7 +1030,7 @@ public class HMaster extends HGlobals
 
       Text metaRegionName;
       HRegionInterface server;
-      if(rootRegion) {
+      if (rootRegion) {
         metaRegionName = rootRegionInfo.regionName;
         server = client.getHRegionConnection(rootRegionLocation);
         
@@ -1074,13 +1074,13 @@ public class HMaster extends HGlobals
 
 
     BytesWritable bytes = server.get(metaRegionName, desc.getName(), META_COL_REGIONINFO);
-    if(bytes != null && bytes.getSize() != 0) {
+    if (bytes != null && bytes.getSize() != 0) {
       byte[] infoBytes = bytes.get();
       DataInputBuffer inbuf = new DataInputBuffer();
       inbuf.reset(infoBytes, infoBytes.length);
       HRegionInfo info = new HRegionInfo();
       info.readFields(inbuf);
-      if(info.tableDesc.getName().compareTo(desc.getName()) == 0) {
+      if (info.tableDesc.getName().compareTo(desc.getName()) == 0) {
         throw new IOException("table already exists");
       }
     }
@@ -1183,7 +1183,7 @@ public class HMaster extends HGlobals
     }
 
     for(Iterator<MetaRegion> i = knownMetaRegions.tailMap(tableName).values().iterator();
-        i.hasNext(); ) {
+        i.hasNext();) {
 
       // Find all the regions that make up this table
       
@@ -1206,7 +1206,7 @@ public class HMaster extends HGlobals
           HRegionInfo info = new HRegionInfo();
           info.readFields(inbuf);
 
-          if(info.tableDesc.getName().compareTo(tableName) > 0) {
+          if (info.tableDesc.getName().compareTo(tableName) > 0) {
             break;                      // Beyond any more entries for this table
           }
 
@@ -1220,12 +1220,12 @@ public class HMaster extends HGlobals
 
           synchronized(serversToServerInfo) {
             HServerInfo s = serversToServerInfo.get(serverName);
-            if(s != null && s.getStartCode() == startCode) {
+            if (s != null && s.getStartCode() == startCode) {
               
               // It is being served. Tell the server to stop it and not report back
               
               TreeMap<Text, HRegionInfo> regionsToKill = killList.get(serverName);
-              if(regionsToKill == null) {
+              if (regionsToKill == null) {
                 regionsToKill = new TreeMap<Text, HRegionInfo>();
               }
               regionsToKill.put(info.regionName, info);
@@ -1233,7 +1233,7 @@ public class HMaster extends HGlobals
             }
           }
         }
-        for(Iterator<Text> row = rowsToDelete.iterator(); row.hasNext(); ) {
+        for(Iterator<Text> row = rowsToDelete.iterator(); row.hasNext();) {
           long lockid = server.startUpdate(m.regionName, clientId, row.next());
           server.delete(m.regionName, clientId, lockid, columns[0]);
           server.commit(m.regionName, clientId, lockid);

+ 23 - 23
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HMemcache.java

@@ -65,10 +65,10 @@ public class HMemcache {
 
     locking.obtainWriteLock();
     try {
-      if(snapshot != null) {
+      if (snapshot != null) {
         throw new IOException("Snapshot in progress!");
       }
-      if(memcache.size() == 0) {
+      if (memcache.size() == 0) {
         LOG.debug("memcache empty. Skipping snapshot");
         return retval;
       }
@@ -99,16 +99,16 @@ public class HMemcache {
     locking.obtainWriteLock();
 
     try {
-      if(snapshot == null) {
+      if (snapshot == null) {
         throw new IOException("Snapshot not present!");
       }
       LOG.debug("deleting snapshot");
       
       for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator(); 
-          it.hasNext(); ) {
+          it.hasNext();) {
         
         TreeMap<HStoreKey, BytesWritable> cur = it.next();
-        if(snapshot == cur) {
+        if (snapshot == cur) {
           it.remove();
           break;
         }
@@ -130,7 +130,7 @@ public class HMemcache {
   public void add(Text row, TreeMap<Text, byte[]> columns, long timestamp) {
     locking.obtainWriteLock();
     try {
-      for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext(); ) {
+      for(Iterator<Text> it = columns.keySet().iterator(); it.hasNext();) {
         Text column = it.next();
         byte[] val = columns.get(column);
 
@@ -156,7 +156,7 @@ public class HMemcache {
       results.addAll(0, result);
 
       for(int i = history.size()-1; i >= 0; i--) {
-        if(numVersions > 0 && results.size() >= numVersions) {
+        if (numVersions > 0 && results.size() >= numVersions) {
           break;
         }
         
@@ -164,7 +164,7 @@ public class HMemcache {
         results.addAll(results.size(), result);
       }
       
-      if(results.size() == 0) {
+      if (results.size() == 0) {
         return null;
         
       } else {
@@ -203,16 +203,16 @@ public class HMemcache {
     
     SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(key);
     
-    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext();) {
       HStoreKey itKey = it.next();
       Text itCol = itKey.getColumn();
 
-      if(results.get(itCol) == null
-         && key.matchesWithoutColumn(itKey)) {
+      if (results.get(itCol) == null
+          && key.matchesWithoutColumn(itKey)) {
         BytesWritable val = tailMap.get(itKey);
         results.put(itCol, val.get());
         
-      } else if(key.getRow().compareTo(itKey.getRow()) > 0) {
+      } else if (key.getRow().compareTo(itKey.getRow()) > 0) {
         break;
       }
     }
@@ -232,15 +232,15 @@ public class HMemcache {
     HStoreKey curKey = new HStoreKey(key.getRow(), key.getColumn(), key.getTimestamp());
     SortedMap<HStoreKey, BytesWritable> tailMap = map.tailMap(curKey);
 
-    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreKey> it = tailMap.keySet().iterator(); it.hasNext();) {
       HStoreKey itKey = it.next();
       
-      if(itKey.matchesRowCol(curKey)) {
+      if (itKey.matchesRowCol(curKey)) {
         result.add(tailMap.get(itKey).get());
         curKey.setVersion(itKey.getTimestamp() - 1);
       }
       
-      if(numVersions > 0 && result.size() >= numVersions) {
+      if (numVersions > 0 && result.size() >= numVersions) {
         break;
       }
     }
@@ -266,7 +266,7 @@ public class HMemcache {
     Iterator<HStoreKey> keyIterators[];
 
     @SuppressWarnings("unchecked")
-      public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
+    public HMemcacheScanner(long timestamp, Text targetCols[], Text firstRow)
       throws IOException {
       
       super(timestamp, targetCols);
@@ -276,7 +276,7 @@ public class HMemcache {
         this.backingMaps = new TreeMap[history.size() + 1];
         int i = 0;
         for(Iterator<TreeMap<HStoreKey, BytesWritable>> it = history.iterator();
-            it.hasNext(); ) {
+            it.hasNext();) {
           
           backingMaps[i++] = it.next();
         }
@@ -290,7 +290,7 @@ public class HMemcache {
 
         HStoreKey firstKey = new HStoreKey(firstRow);
         for(i = 0; i < backingMaps.length; i++) {
-          if(firstRow.getLength() != 0) {
+          if (firstRow.getLength() != 0) {
             keyIterators[i] = backingMaps[i].tailMap(firstKey).keySet().iterator();
             
           } else {
@@ -298,10 +298,10 @@ public class HMemcache {
           }
           
           while(getNext(i)) {
-            if(! findFirstRow(i, firstRow)) {
+            if (!findFirstRow(i, firstRow)) {
               continue;
             }
-            if(columnMatch(i)) {
+            if (columnMatch(i)) {
               break;
             }
           }
@@ -331,7 +331,7 @@ public class HMemcache {
      * @return - true if there is more data available
      */
     boolean getNext(int i) {
-      if(! keyIterators[i].hasNext()) {
+      if (!keyIterators[i].hasNext()) {
         closeSubScanner(i);
         return false;
       }
@@ -350,10 +350,10 @@ public class HMemcache {
 
     /** Shut down map iterators, and release the lock */
     public void close() throws IOException {
-      if(! scannerClosed) {
+      if (!scannerClosed) {
         try {
           for(int i = 0; i < keys.length; i++) {
-            if(keyIterators[i] != null) {
+            if (keyIterators[i] != null) {
               closeSubScanner(i);
             }
           }

+ 102 - 102
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegion.java

@@ -61,21 +61,21 @@ public class HRegion implements HConstants {
     // Make sure that srcA comes first; important for key-ordering during
     // write of the merged file.
     
-    if(srcA.getStartKey() == null) {
-      if(srcB.getStartKey() == null) {
+    if (srcA.getStartKey() == null) {
+      if (srcB.getStartKey() == null) {
         throw new IOException("Cannot merge two regions with null start key");
       }
       // A's start key is null but B's isn't. Assume A comes before B
       
-    } else if((srcB.getStartKey() == null)         // A is not null but B is
-        || (srcA.getStartKey().compareTo(srcB.getStartKey()) > 0)) { // A > B
+    } else if ((srcB.getStartKey() == null)         // A is not null but B is
+               || (srcA.getStartKey().compareTo(srcB.getStartKey()) > 0)) { // A > B
       
       HRegion tmp = srcA;
       srcA = srcB;
       srcB = tmp;
     }
     
-    if (! srcA.getEndKey().equals(srcB.getStartKey())) {
+    if (!srcA.getEndKey().equals(srcB.getStartKey())) {
       throw new IOException("Cannot merge non-adjacent regions");
     }
 
@@ -89,7 +89,7 @@ public class HRegion implements HConstants {
     Text endKey = srcB.getEndKey();
 
     Path merges = new Path(srcA.getRegionDir(), MERGEDIR);
-    if(! fs.exists(merges)) {
+    if (!fs.exists(merges)) {
       fs.mkdirs(merges);
     }
     
@@ -98,14 +98,14 @@ public class HRegion implements HConstants {
     
     Path newRegionDir = HStoreFile.getHRegionDir(merges, newRegionInfo.regionName);
 
-    if(fs.exists(newRegionDir)) {
+    if (fs.exists(newRegionDir)) {
       throw new IOException("Cannot merge; target file collision at " + newRegionDir);
     }
 
     LOG.info("starting merge of regions: " + srcA.getRegionName() + " and " 
-        + srcB.getRegionName() + " new region start key is '" 
-        + (startKey == null ? "" : startKey) + "', end key is '" 
-        + (endKey == null ? "" : endKey) + "'");
+             + srcB.getRegionName() + " new region start key is '" 
+             + (startKey == null ? "" : startKey) + "', end key is '" 
+             + (endKey == null ? "" : endKey) + "'");
     
     // Flush each of the sources, and merge their files into a single 
     // target for each column family.
@@ -114,10 +114,10 @@ public class HRegion implements HConstants {
     
     TreeSet<HStoreFile> alreadyMerged = new TreeSet<HStoreFile>();
     TreeMap<Text, Vector<HStoreFile>> filesToMerge = new TreeMap<Text, Vector<HStoreFile>>();
-    for(Iterator<HStoreFile> it = srcA.flushcache(true).iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcA.flushcache(true).iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-      if(v == null) {
+      if (v == null) {
         v = new Vector<HStoreFile>();
         filesToMerge.put(src.getColFamily(), v);
       }
@@ -126,10 +126,10 @@ public class HRegion implements HConstants {
     
     LOG.debug("flushing and getting file names for region " + srcB.getRegionName());
     
-    for(Iterator<HStoreFile> it = srcB.flushcache(true).iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcB.flushcache(true).iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-      if(v == null) {
+      if (v == null) {
         v = new Vector<HStoreFile>();
         filesToMerge.put(src.getColFamily(), v);
       }
@@ -138,11 +138,11 @@ public class HRegion implements HConstants {
     
     LOG.debug("merging stores");
     
-    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext();) {
       Text colFamily = it.next();
       Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
       HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName, 
-          colFamily, Math.abs(rand.nextLong()));
+                                      colFamily, Math.abs(rand.nextLong()));
       
       dst.mergeStoreFiles(srcFiles, fs, conf);
       alreadyMerged.addAll(srcFiles);
@@ -153,15 +153,15 @@ public class HRegion implements HConstants {
     // of any last-minute inserts
 
     LOG.debug("flushing changes since start of merge for region " 
-        + srcA.getRegionName());
+              + srcA.getRegionName());
 
     filesToMerge.clear();
-    for(Iterator<HStoreFile> it = srcA.close().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcA.close().iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       
-      if(! alreadyMerged.contains(src)) {
+      if (!alreadyMerged.contains(src)) {
         Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-        if(v == null) {
+        if (v == null) {
           v = new Vector<HStoreFile>();
           filesToMerge.put(src.getColFamily(), v);
         }
@@ -170,14 +170,14 @@ public class HRegion implements HConstants {
     }
     
     LOG.debug("flushing changes since start of merge for region " 
-        + srcB.getRegionName());
+              + srcB.getRegionName());
     
-    for(Iterator<HStoreFile> it = srcB.close().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcB.close().iterator(); it.hasNext();) {
       HStoreFile src = it.next();
       
-      if(! alreadyMerged.contains(src)) {
+      if (!alreadyMerged.contains(src)) {
         Vector<HStoreFile> v = filesToMerge.get(src.getColFamily());
-        if(v == null) {
+        if (v == null) {
           v = new Vector<HStoreFile>();
           filesToMerge.put(src.getColFamily(), v);
         }
@@ -187,11 +187,11 @@ public class HRegion implements HConstants {
     
     LOG.debug("merging changes since start of merge");
     
-    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = filesToMerge.keySet().iterator(); it.hasNext();) {
       Text colFamily = it.next();
       Vector<HStoreFile> srcFiles = filesToMerge.get(colFamily);
       HStoreFile dst = new HStoreFile(conf, merges, newRegionInfo.regionName,
-          colFamily, Math.abs(rand.nextLong()));
+                                      colFamily, Math.abs(rand.nextLong()));
       
       dst.mergeStoreFiles(srcFiles, fs, conf);
     }
@@ -199,7 +199,7 @@ public class HRegion implements HConstants {
     // Done
     
     HRegion dstRegion = new HRegion(dir, log, fs, conf, newRegionInfo,
-        newRegionDir, null);
+                                    newRegionDir, null);
 
     // Get rid of merges directory
     
@@ -284,7 +284,7 @@ public class HRegion implements HConstants {
    * written-to before), then read it from the supplied path.
    */
   public HRegion(Path dir, HLog log, FileSystem fs, Configuration conf, 
-      HRegionInfo regionInfo, Path initialFiles, Path oldLogFile) throws IOException {
+                 HRegionInfo regionInfo, Path initialFiles, Path oldLogFile) throws IOException {
     
     this.dir = dir;
     this.log = log;
@@ -303,29 +303,29 @@ public class HRegion implements HConstants {
 
     // Move prefab HStore files into place (if any)
     
-    if(initialFiles != null && fs.exists(initialFiles)) {
+    if (initialFiles != null && fs.exists(initialFiles)) {
       fs.rename(initialFiles, regiondir);
     }
 
     // Load in all the HStores.
     
     for(Iterator<Text> it = this.regionInfo.tableDesc.families().iterator();
-        it.hasNext(); ) {
+        it.hasNext();) {
       
       Text colFamily = it.next();
       stores.put(colFamily, new HStore(dir, this.regionInfo.regionName, colFamily, 
-          this.regionInfo.tableDesc.getMaxVersions(), fs, oldLogFile, conf));
+                                       this.regionInfo.tableDesc.getMaxVersions(), fs, oldLogFile, conf));
     }
 
     // Get rid of any splits or merges that were lost in-progress
     
     Path splits = new Path(regiondir, SPLITDIR);
-    if(fs.exists(splits)) {
+    if (fs.exists(splits)) {
       fs.delete(splits);
     }
     
     Path merges = new Path(regiondir, MERGEDIR);
-    if(fs.exists(merges)) {
+    if (fs.exists(merges)) {
       fs.delete(merges);
     }
 
@@ -362,7 +362,7 @@ public class HRegion implements HConstants {
   public Vector<HStoreFile> close() throws IOException {
     boolean shouldClose = false;
     synchronized(writestate) {
-      if(writestate.closed) {
+      if (writestate.closed) {
         LOG.info("region " + this.regionInfo.regionName + " closed");
         return new Vector<HStoreFile>();
       }
@@ -376,13 +376,13 @@ public class HRegion implements HConstants {
       shouldClose = true;
     }
 
-    if(! shouldClose) {
+    if (!shouldClose) {
       return null;
       
     } else {
       LOG.info("closing region " + this.regionInfo.regionName);
       Vector<HStoreFile> allHStoreFiles = internalFlushcache();
-      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
         HStore store = it.next();
         store.close();
       }
@@ -406,8 +406,8 @@ public class HRegion implements HConstants {
    * Returns two brand-new (and open) HRegions
    */
   public HRegion[] closeAndSplit(Text midKey) throws IOException {
-    if(((regionInfo.startKey.getLength() != 0)
-        && (regionInfo.startKey.compareTo(midKey) > 0))
+    if (((regionInfo.startKey.getLength() != 0)
+         && (regionInfo.startKey.compareTo(midKey) > 0))
         || ((regionInfo.endKey.getLength() != 0)
             && (regionInfo.endKey.compareTo(midKey) < 0))) {
       throw new IOException("Region splitkey must lie within region boundaries.");
@@ -419,13 +419,13 @@ public class HRegion implements HConstants {
     // or compactions until close() is called.
     
     Path splits = new Path(regiondir, SPLITDIR);
-    if(! fs.exists(splits)) {
+    if (!fs.exists(splits)) {
       fs.mkdirs(splits);
     }
     
     long regionAId = Math.abs(rand.nextLong());
     HRegionInfo regionAInfo = new HRegionInfo(regionAId, regionInfo.tableDesc, 
-        regionInfo.startKey, midKey);
+                                              regionInfo.startKey, midKey);
         
     long regionBId = Math.abs(rand.nextLong());
     HRegionInfo regionBInfo
@@ -434,24 +434,24 @@ public class HRegion implements HConstants {
     Path dirA = HStoreFile.getHRegionDir(splits, regionAInfo.regionName);
     Path dirB = HStoreFile.getHRegionDir(splits, regionBInfo.regionName);
 
-    if(fs.exists(dirA) || fs.exists(dirB)) {
+    if (fs.exists(dirA) || fs.exists(dirB)) {
       throw new IOException("Cannot split; target file collision at " + dirA 
-          + " or " + dirB);
+                            + " or " + dirB);
     }
     
     TreeSet<HStoreFile> alreadySplit = new TreeSet<HStoreFile>();
     Vector<HStoreFile> hstoreFilesToSplit = flushcache(true);
-    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       
       LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
-          + "/" + hsf.fileId());
+                + "/" + hsf.fileId());
 
       HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName, 
-          hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                       hsf.getColFamily(), Math.abs(rand.nextLong()));
       
       HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName, 
-          hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                       hsf.getColFamily(), Math.abs(rand.nextLong()));
       
       hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
       alreadySplit.add(hsf);
@@ -461,18 +461,18 @@ public class HRegion implements HConstants {
     // and copy the small remainder
     
     hstoreFilesToSplit = close();
-    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFilesToSplit.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       
-      if(! alreadySplit.contains(hsf)) {
+      if (!alreadySplit.contains(hsf)) {
         LOG.debug("splitting HStore " + hsf.getRegionName() + "/" + hsf.getColFamily()
-            + "/" + hsf.fileId());
+                  + "/" + hsf.fileId());
 
         HStoreFile dstA = new HStoreFile(conf, splits, regionAInfo.regionName, 
-            hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                         hsf.getColFamily(), Math.abs(rand.nextLong()));
         
         HStoreFile dstB = new HStoreFile(conf, splits, regionBInfo.regionName, 
-            hsf.getColFamily(), Math.abs(rand.nextLong()));
+                                         hsf.getColFamily(), Math.abs(rand.nextLong()));
         
         hsf.splitStoreFile(midKey, dstA, dstB, fs, conf);
       }
@@ -494,7 +494,7 @@ public class HRegion implements HConstants {
     regions[1] = regionB;
     
     LOG.info("region split complete. new regions are: " + regions[0].getRegionName()
-        + ", " + regions[1].getRegionName());
+             + ", " + regions[1].getRegionName());
     
     return regions;
   }
@@ -565,10 +565,10 @@ public class HRegion implements HConstants {
     Text key = new Text();
     long maxSize = 0;
 
-    for(Iterator<HStore> i = stores.values().iterator(); i.hasNext(); ) {
+    for(Iterator<HStore> i = stores.values().iterator(); i.hasNext();) {
       long size = i.next().getLargestFileSize(key);
       
-      if(size > maxSize) {                      // Largest so far
+      if (size > maxSize) {                      // Largest so far
         maxSize = size;
         midKey.set(key);
       }
@@ -593,9 +593,9 @@ public class HRegion implements HConstants {
   public boolean compactStores() throws IOException {
     boolean shouldCompact = false;
     synchronized(writestate) {
-      if((! writestate.writesOngoing)
+      if ((!writestate.writesOngoing)
           && writestate.writesEnabled
-          && (! writestate.closed)
+          && (!writestate.closed)
           && recentCommits > MIN_COMMITS_FOR_COMPACTION) {
         
         writestate.writesOngoing = true;
@@ -603,14 +603,14 @@ public class HRegion implements HConstants {
       }
     }
 
-    if(! shouldCompact) {
+    if (!shouldCompact) {
       LOG.info("not compacting region " + this.regionInfo.regionName);
       return false;
       
     } else {
       try {
         LOG.info("starting compaction on region " + this.regionInfo.regionName);
-        for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+        for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
           HStore store = it.next();
           store.compact();
         }
@@ -632,7 +632,7 @@ public class HRegion implements HConstants {
    * only take if there have been a lot of uncommitted writes.
    */
   public void optionallyFlush() throws IOException {
-    if(commitsSinceFlush > maxUnflushedEntries) {
+    if (commitsSinceFlush > maxUnflushedEntries) {
       flushcache(false);
     }
   }
@@ -657,20 +657,20 @@ public class HRegion implements HConstants {
   public Vector<HStoreFile> flushcache(boolean disableFutureWrites) throws IOException {
     boolean shouldFlush = false;
     synchronized(writestate) {
-      if((! writestate.writesOngoing)
+      if ((!writestate.writesOngoing)
           && writestate.writesEnabled
-          && (! writestate.closed)) {
+          && (!writestate.closed)) {
         
         writestate.writesOngoing = true;
         shouldFlush = true;
         
-        if(disableFutureWrites) {
+        if (disableFutureWrites) {
           writestate.writesEnabled = false;
         }
       }
     }
     
-    if(! shouldFlush) {
+    if (!shouldFlush) {
       LOG.debug("not flushing cache for region " + this.regionInfo.regionName);
       return null;
       
@@ -731,8 +731,8 @@ public class HRegion implements HConstants {
     
     HMemcache.Snapshot retval = memcache.snapshotMemcacheForLog(log);
     TreeMap<HStoreKey, BytesWritable> memcacheSnapshot = retval.memcacheSnapshot;
-    if(memcacheSnapshot == null) {
-      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+    if (memcacheSnapshot == null) {
+      for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
         HStore hstore = it.next();
         Vector<HStoreFile> hstoreFiles = hstore.getAllMapFiles();
         allHStoreFiles.addAll(0, hstoreFiles);
@@ -746,7 +746,7 @@ public class HRegion implements HConstants {
     
     LOG.debug("flushing memcache to HStores");
     
-    for(Iterator<HStore> it = stores.values().iterator(); it.hasNext(); ) {
+    for(Iterator<HStore> it = stores.values().iterator(); it.hasNext();) {
       HStore hstore = it.next();
       Vector<HStoreFile> hstoreFiles 
         = hstore.flushCache(memcacheSnapshot, logCacheFlushId);
@@ -762,7 +762,7 @@ public class HRegion implements HConstants {
     LOG.debug("writing flush cache complete to log");
     
     log.completeCacheFlush(this.regionInfo.regionName,
-        regionInfo.tableDesc.getName(), logCacheFlushId);
+                           regionInfo.tableDesc.getName(), logCacheFlushId);
 
     // C. Delete the now-irrelevant memcache snapshot; its contents have been 
     //    dumped to disk-based HStores.
@@ -784,7 +784,7 @@ public class HRegion implements HConstants {
   /** Fetch a single data item. */
   public byte[] get(Text row, Text column) throws IOException {
     byte results[][] = get(row, column, Long.MAX_VALUE, 1);
-    if(results == null) {
+    if (results == null) {
       return null;
       
     } else {
@@ -799,9 +799,9 @@ public class HRegion implements HConstants {
 
   /** Fetch multiple versions of a single data item, with timestamp. */
   public byte[][] get(Text row, Text column, long timestamp, int numVersions) 
-      throws IOException {
+    throws IOException {
     
-    if(writestate.closed) {
+    if (writestate.closed) {
       throw new IOException("HRegion is closed.");
     }
 
@@ -830,7 +830,7 @@ public class HRegion implements HConstants {
     // Check the memcache
 
     byte[][] result = memcache.get(key, numVersions);
-    if(result != null) {
+    if (result != null) {
       return result;
     }
 
@@ -838,7 +838,7 @@ public class HRegion implements HConstants {
 
     Text colFamily = HStoreKey.extractFamily(key.getColumn());
     HStore targetStore = stores.get(colFamily);
-    if(targetStore == null) {
+    if (targetStore == null) {
       return null;
     }
     
@@ -859,7 +859,7 @@ public class HRegion implements HConstants {
     HStoreKey key = new HStoreKey(row, System.currentTimeMillis());
 
     TreeMap<Text, byte[]> memResult = memcache.getFull(key);
-    for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = stores.keySet().iterator(); it.hasNext();) {
       Text colFamily = it.next();
       HStore targetStore = stores.get(colFamily);
       targetStore.getFull(key, memResult);
@@ -879,7 +879,7 @@ public class HRegion implements HConstants {
 
     HStore storelist[] = new HStore[families.size()];
     int i = 0;
-    for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = families.iterator(); it.hasNext();) {
       Text family = it.next();
       storelist[i++] = stores.get(family);
     }
@@ -918,16 +918,16 @@ public class HRegion implements HConstants {
    * method.
    */
   public void put(long lockid, Text targetCol, byte[] val) throws IOException {
-    if(val.length == HStoreKey.DELETE_BYTES.length) {
+    if (val.length == HStoreKey.DELETE_BYTES.length) {
       boolean matches = true;
       for(int i = 0; i < val.length; i++) {
-        if(val[i] != HStoreKey.DELETE_BYTES[i]) {
+        if (val[i] != HStoreKey.DELETE_BYTES[i]) {
           matches = false;
           break;
         }
       }
       
-      if(matches) {
+      if (matches) {
         throw new IOException("Cannot insert value: " + val);
       }
     }
@@ -951,7 +951,7 @@ public class HRegion implements HConstants {
    */
   void localput(long lockid, Text targetCol, byte[] val) throws IOException {
     Text row = getRowFromLock(lockid);
-    if(row == null) {
+    if (row == null) {
       throw new IOException("No write lock for lockid " + lockid);
     }
 
@@ -964,13 +964,13 @@ public class HRegion implements HConstants {
       // This check makes sure that another thread from the client
       // hasn't aborted/committed the write-operation.
 
-      if(row != getRowFromLock(lockid)) {
+      if (row != getRowFromLock(lockid)) {
         throw new IOException("Locking error: put operation on lock " + lockid 
-            + " unexpected aborted by another thread");
+                              + " unexpected aborted by another thread");
       }
       
       TreeMap<Text, byte[]> targets = targetColumns.get(lockid);
-      if(targets == null) {
+      if (targets == null) {
         targets = new TreeMap<Text, byte[]>();
         targetColumns.put(lockid, targets);
       }
@@ -985,7 +985,7 @@ public class HRegion implements HConstants {
    */
   public void abort(long lockid) throws IOException {
     Text row = getRowFromLock(lockid);
-    if(row == null) {
+    if (row == null) {
       throw new IOException("No write lock for lockid " + lockid);
     }
     
@@ -998,9 +998,9 @@ public class HRegion implements HConstants {
       // This check makes sure another thread from the client
       // hasn't aborted/committed the write-operation.
       
-      if(row != getRowFromLock(lockid)) {
+      if (row != getRowFromLock(lockid)) {
         throw new IOException("Locking error: abort() operation on lock " 
-            + lockid + " unexpected aborted by another thread");
+                              + lockid + " unexpected aborted by another thread");
       }
       
       targetColumns.remove(lockid);
@@ -1021,7 +1021,7 @@ public class HRegion implements HConstants {
     // that repeated executions won't screw this up.
     
     Text row = getRowFromLock(lockid);
-    if(row == null) {
+    if (row == null) {
       throw new IOException("No write lock for lockid " + lockid);
     }
     
@@ -1035,7 +1035,7 @@ public class HRegion implements HConstants {
 
       long commitTimestamp = System.currentTimeMillis();
       log.append(regionInfo.regionName, regionInfo.tableDesc.getName(), row, 
-          targetColumns.get(lockid), commitTimestamp);
+                 targetColumns.get(lockid), commitTimestamp);
       
       memcache.add(row, targetColumns.get(lockid), commitTimestamp);
 
@@ -1054,25 +1054,25 @@ public class HRegion implements HConstants {
 
   /** Make sure this is a valid row for the HRegion */
   void checkRow(Text row) throws IOException {
-    if(((regionInfo.startKey.getLength() == 0)
-        || (regionInfo.startKey.compareTo(row) <= 0))
+    if (((regionInfo.startKey.getLength() == 0)
+         || (regionInfo.startKey.compareTo(row) <= 0))
         && ((regionInfo.endKey.getLength() == 0)
             || (regionInfo.endKey.compareTo(row) > 0))) {
       // all's well
       
     } else {
       throw new IOException("Requested row out of range for HRegion "
-          + regionInfo.regionName + ", startKey='" + regionInfo.startKey
-          + "', endKey='" + regionInfo.endKey + "', row='" + row + "'");
+                            + regionInfo.regionName + ", startKey='" + regionInfo.startKey
+                            + "', endKey='" + regionInfo.endKey + "', row='" + row + "'");
     }
   }
 
   /** Make sure this is a valid column for the current table */
   void checkFamily(Text family) throws IOException {
-    if(! regionInfo.tableDesc.hasFamily(family)) {
+    if (!regionInfo.tableDesc.hasFamily(family)) {
       throw new IOException("Requested column family " + family 
-          + " does not exist in HRegion " + regionInfo.regionName
-          + " for table " + regionInfo.tableDesc.getName());
+                            + " does not exist in HRegion " + regionInfo.regionName
+                            + " for table " + regionInfo.tableDesc.getName());
     }
   }
 
@@ -1150,7 +1150,7 @@ public class HRegion implements HConstants {
         keys[i] = new HStoreKey();
         resultSets[i] = new TreeMap<Text, byte[]>();
 
-        if(! scanners[i].next(keys[i], resultSets[i])) {
+        if (!scanners[i].next(keys[i], resultSets[i])) {
           closeScanner(i);
         }
       }
@@ -1167,7 +1167,7 @@ public class HRegion implements HConstants {
       Text chosenRow = null;
       long chosenTimestamp = -1;
       for(int i = 0; i < keys.length; i++) {
-        if(scanners[i] != null
+        if (scanners[i] != null
             && (chosenRow == null
                 || (keys[i].getRow().compareTo(chosenRow) < 0)
                 || ((keys[i].getRow().compareTo(chosenRow) == 0)
@@ -1181,21 +1181,21 @@ public class HRegion implements HConstants {
       // Store the key and results for each sub-scanner. Merge them as appropriate.
       
       boolean insertedItem = false;
-      if(chosenTimestamp > 0) {
+      if (chosenTimestamp > 0) {
         key.setRow(chosenRow);
         key.setVersion(chosenTimestamp);
         key.setColumn(new Text(""));
 
         for(int i = 0; i < scanners.length; i++) {        
           while((scanners[i] != null)
-              && (keys[i].getRow().compareTo(chosenRow) == 0)
-              && (keys[i].getTimestamp() == chosenTimestamp)) {
+                && (keys[i].getRow().compareTo(chosenRow) == 0)
+                && (keys[i].getTimestamp() == chosenTimestamp)) {
             
             results.putAll(resultSets[i]);
             insertedItem = true;
 
             resultSets[i].clear();
-            if(! scanners[i].next(keys[i], resultSets[i])) {
+            if (!scanners[i].next(keys[i], resultSets[i])) {
               closeScanner(i);
             }
           }
@@ -1204,10 +1204,10 @@ public class HRegion implements HConstants {
           // row label, then its timestamp is bad.  We need to advance it.
 
           while((scanners[i] != null)
-              && (keys[i].getRow().compareTo(chosenRow) <= 0)) {
+                && (keys[i].getRow().compareTo(chosenRow) <= 0)) {
             
             resultSets[i].clear();
-            if(! scanners[i].next(keys[i], resultSets[i])) {
+            if (!scanners[i].next(keys[i], resultSets[i])) {
               closeScanner(i);
             }
           }
@@ -1231,7 +1231,7 @@ public class HRegion implements HConstants {
     /** All done with the scanner. */
     public void close() throws IOException {
       for(int i = 0; i < scanners.length; i++) {
-        if(scanners[i] != null) {
+        if (scanners[i] != null) {
           closeScanner(i);
         }
       }

+ 3 - 3
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionInfo.java

@@ -42,19 +42,19 @@ public class HRegionInfo implements Writable {
     
     this.regionId = regionId;
     
-    if(tableDesc == null) {
+    if (tableDesc == null) {
       throw new IllegalArgumentException("tableDesc cannot be null");
     }
     
     this.tableDesc = tableDesc;
     
     this.startKey = new Text();
-    if(startKey != null) {
+    if (startKey != null) {
       this.startKey.set(startKey);
     }
     
     this.endKey = new Text();
-    if(endKey != null) {
+    if (endKey != null) {
       this.endKey.set(endKey);
     }
     

+ 34 - 34
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HRegionServer.java

@@ -61,7 +61,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     }
     
     public void run() {
-      while(! stopRequested) {
+      while(!stopRequested) {
         long startTime = System.currentTimeMillis();
 
         // Grab a list of regions to check
@@ -78,12 +78,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
         // Check to see if they need splitting
 
         Vector<SplitRegion> toSplit = new Vector<SplitRegion>();
-        for(Iterator<HRegion> it = checkSplit.iterator(); it.hasNext(); ) {
+        for(Iterator<HRegion> it = checkSplit.iterator(); it.hasNext();) {
           HRegion cur = it.next();
           Text midKey = new Text();
           
           try {
-            if(cur.needsSplit(midKey)) {
+            if (cur.needsSplit(midKey)) {
               toSplit.add(new SplitRegion(cur, midKey));
             }
             
@@ -92,7 +92,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
           }
         }
 
-        for(Iterator<SplitRegion> it = toSplit.iterator(); it.hasNext(); ) {
+        for(Iterator<SplitRegion> it = toSplit.iterator(); it.hasNext();) {
           SplitRegion r = it.next();
           
           locking.obtainWriteLock();
@@ -161,7 +161,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   private Thread cacheFlusherThread;
   private class Flusher implements Runnable {
     public void run() {
-      while(! stopRequested) {
+      while(!stopRequested) {
         long startTime = System.currentTimeMillis();
 
         // Grab a list of items to flush
@@ -177,7 +177,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
 
         // Flush them, if necessary
 
-        for(Iterator<HRegion> it = toFlush.iterator(); it.hasNext(); ) {
+        for(Iterator<HRegion> it = toFlush.iterator(); it.hasNext();) {
           HRegion cur = it.next();
           
           try {
@@ -212,12 +212,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   private Thread logRollerThread;
   private class LogRoller implements Runnable {
     public void run() {
-      while(! stopRequested) {
+      while(!stopRequested) {
 
         // If the number of log entries is high enough, roll the log.  This is a
         // very fast operation, but should not be done too frequently.
 
-        if(log.getNumEntries() > maxLogEntries) {
+        if (log.getNumEntries() > maxLogEntries) {
           try {
             log.rollWriter();
             
@@ -334,7 +334,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
    * processing to cease.
    */
   public void stop() throws IOException {
-    if(! stopRequested) {
+    if (!stopRequested) {
       stopRequested = true;
  
       closeAllRegions();
@@ -375,7 +375,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
    * load/unload instructions.
    */
   public void run() {
-    while(! stopRequested) {
+    while(!stopRequested) {
       HServerInfo info = new HServerInfo(address, rand.nextLong());
       long lastMsg = 0;
       long waitTime;
@@ -398,8 +398,8 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
       
       // Now ask the master what it wants us to do and tell it what we have done.
       
-      while(! stopRequested) {
-        if((System.currentTimeMillis() - lastMsg) >= msgInterval) {
+      while(!stopRequested) {
+        if ((System.currentTimeMillis() - lastMsg) >= msgInterval) {
 
           HMsg outboundArray[] = null;
           synchronized(outboundMsgs) {
@@ -413,7 +413,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
 
             // Process the HMaster's instruction stream
 
-            if(! processMessages(msgs)) {
+            if (!processMessages(msgs)) {
               break;
             }
 
@@ -529,10 +529,10 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     try {
       HRegion region = regions.remove(info.regionName);
       
-      if(region != null) {
+      if (region != null) {
         region.close();
         
-        if(reportWhenCompleted) {
+        if (reportWhenCompleted) {
           reportClose(region);
         }
       }
@@ -548,7 +548,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     try {
       HRegion region = regions.remove(info.regionName);
   
-      if(region != null) {
+      if (region != null) {
         region.closeAndDelete();
       }
   
@@ -561,7 +561,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   private void closeAllRegions() throws IOException {
     locking.obtainWriteLock();
     try {
-      for(Iterator<HRegion> it = regions.values().iterator(); it.hasNext(); ) {
+      for(Iterator<HRegion> it = regions.values().iterator(); it.hasNext();) {
         HRegion region = it.next();
         region.close();
       }
@@ -606,7 +606,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   /** Obtain a table descriptor for the given region */
   public HRegionInfo getRegionInfo(Text regionName) {
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       return null;
     }
     return region.getRegionInfo();
@@ -617,7 +617,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
                                        Text firstRow) throws IOException {
 
     HRegion r = getRegion(regionName);
-    if(r == null) {
+    if (r == null) {
       throw new IOException("Not serving region " + regionName);
     }
     return r.getScanner(cols, firstRow);
@@ -626,12 +626,12 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   /** Get the indicated row/column */
   public BytesWritable get(Text regionName, Text row, Text column) throws IOException {
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
     }
     
     byte results[] = region.get(row, column);
-    if(results != null) {
+    if (results != null) {
       return new BytesWritable(results);
     }
     return null;
@@ -642,15 +642,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
                              int numVersions) throws IOException {
     
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
     }
     
     byte results[][] = region.get(row, column, numVersions);
-    if(results != null) {
+    if (results != null) {
       BytesWritable realResults[] = new BytesWritable[results.length];
       for(int i = 0; i < realResults.length; i++) {
-        if(results[i] != null) {
+        if (results[i] != null) {
           realResults[i] = new BytesWritable(results[i]);
         }
       }
@@ -664,15 +664,15 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
                              long timestamp, int numVersions) throws IOException {
     
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
     }
     
     byte results[][] = region.get(row, column, timestamp, numVersions);
-    if(results != null) {
+    if (results != null) {
       BytesWritable realResults[] = new BytesWritable[results.length];
       for(int i = 0; i < realResults.length; i++) {
-        if(results[i] != null) {
+        if (results[i] != null) {
           realResults[i] = new BytesWritable(results[i]);
         }
       }
@@ -684,14 +684,14 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
   /** Get all the columns (along with their names) for a given row. */
   public LabelledData[] getRow(Text regionName, Text row) throws IOException {
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
     }
     
     TreeMap<Text, byte[]> map = region.getFull(row);
     LabelledData result[] = new LabelledData[map.size()];
     int counter = 0;
-    for(Iterator<Text> it = map.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = map.keySet().iterator(); it.hasNext();) {
       Text colname = it.next();
       byte val[] = map.get(colname);
       result[counter++] = new LabelledData(colname, val);
@@ -726,7 +726,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     throws IOException {
     
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
     }
     
@@ -743,7 +743,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
                   BytesWritable val) throws IOException {
     
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
     }
     
@@ -758,7 +758,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     throws IOException {
     
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
     }
     
@@ -773,7 +773,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     throws IOException {
     
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
     }
     
@@ -788,7 +788,7 @@ public class HRegionServer implements HConstants, HRegionInterface, Runnable {
     throws IOException {
     
     HRegion region = getRegion(regionName);
-    if(region == null) {
+    if (region == null) {
       throw new IOException("Not serving region " + regionName);
     }
     

+ 3 - 3
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HServerAddress.java

@@ -35,7 +35,7 @@ public class HServerAddress implements Writable {
   
   public HServerAddress(String hostAndPort) {
     int colonIndex = hostAndPort.indexOf(':');
-    if(colonIndex < 0) {
+    if (colonIndex < 0) {
       throw new IllegalArgumentException("Not a host:port pair: " + hostAndPort);
     }
     String host = hostAndPort.substring(0, colonIndex);
@@ -80,7 +80,7 @@ public class HServerAddress implements Writable {
     String bindAddress = in.readUTF();
     int port = in.readInt();
     
-    if(bindAddress == null || bindAddress.length() == 0) {
+    if (bindAddress == null || bindAddress.length() == 0) {
       address = null;
       stringValue = null;
       
@@ -91,7 +91,7 @@ public class HServerAddress implements Writable {
   }
 
   public void write(DataOutput out) throws IOException {
-    if(address == null) {
+    if (address == null) {
       out.writeUTF("");
       out.writeInt(0);
       

+ 59 - 59
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStore.java

@@ -110,7 +110,7 @@ public class HStore {
 
     this.compactdir = new Path(dir, COMPACTION_DIR);
     Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
-    if(fs.exists(curCompactStore)) {
+    if (fs.exists(curCompactStore)) {
       processReadyCompaction();
       fs.delete(curCompactStore);
     }
@@ -123,7 +123,7 @@ public class HStore {
     Vector<HStoreFile> hstoreFiles 
       = HStoreFile.loadHStoreFiles(conf, dir, regionName, colFamily, fs);
     
-    for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       mapFiles.put(hsf.loadInfo(fs), hsf);
     }
@@ -138,11 +138,11 @@ public class HStore {
     // contain any updates also contained in the log.
 
     long maxSeqID = -1;
-    for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = hstoreFiles.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       long seqid = hsf.loadInfo(fs);
-      if(seqid > 0) {
-        if(seqid > maxSeqID) {
+      if (seqid > 0) {
+        if (seqid > maxSeqID) {
           maxSeqID = seqid;
         }
       }
@@ -157,7 +157,7 @@ public class HStore {
 
     LOG.debug("reading reconstructionLog");
     
-    if(reconstructionLog != null && fs.exists(reconstructionLog)) {
+    if (reconstructionLog != null && fs.exists(reconstructionLog)) {
       long maxSeqIdInLog = -1;
       TreeMap<HStoreKey, BytesWritable> reconstructedCache 
         = new TreeMap<HStoreKey, BytesWritable>();
@@ -170,7 +170,7 @@ public class HStore {
         HLogEdit val = new HLogEdit();
         while(login.next(key, val)) {
           maxSeqIdInLog = Math.max(maxSeqIdInLog, key.getLogSeqNum());
-          if(key.getLogSeqNum() <= maxSeqID) {
+          if (key.getLogSeqNum() <= maxSeqID) {
             continue;
           }
           reconstructedCache.put(new HStoreKey(key.getRow(), val.getColumn(), 
@@ -181,7 +181,7 @@ public class HStore {
         login.close();
       }
 
-      if(reconstructedCache.size() > 0) {
+      if (reconstructedCache.size() > 0) {
         
         // We create a "virtual flush" at maxSeqIdInLog+1.
         
@@ -195,7 +195,7 @@ public class HStore {
     // should be "timeless"; that is, it should not have an associated seq-ID, 
     // because all log messages have been reflected in the TreeMaps at this point.
     
-    if(mapFiles.size() >= 1) {
+    if (mapFiles.size() >= 1) {
       compactHelper(true);
     }
 
@@ -204,7 +204,7 @@ public class HStore {
 
     LOG.debug("starting map readers");
     
-    for(Iterator<Long> it = mapFiles.keySet().iterator(); it.hasNext(); ) {
+    for(Iterator<Long> it = mapFiles.keySet().iterator(); it.hasNext();) {
       Long key = it.next().longValue();
       HStoreFile hsf = mapFiles.get(key);
 
@@ -222,7 +222,7 @@ public class HStore {
     LOG.info("closing HStore for " + this.regionName + "/" + this.colFamily);
     
     try {
-      for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext(); ) {
+      for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext();) {
         MapFile.Reader map = it.next();
         map.close();
       }
@@ -273,9 +273,9 @@ public class HStore {
                                               HStoreKey.class, BytesWritable.class);
       
       try {
-        for(Iterator<HStoreKey> it = inputCache.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<HStoreKey> it = inputCache.keySet().iterator(); it.hasNext();) {
           HStoreKey curkey = it.next();
-          if(this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
+          if (this.colFamily.equals(HStoreKey.extractFamily(curkey.getColumn()))) {
             BytesWritable val = inputCache.get(curkey);
             out.append(curkey, val);
           }
@@ -294,7 +294,7 @@ public class HStore {
 
       // C. Finally, make the new MapFile available.
 
-      if(addToAvailableMaps) {
+      if (addToAvailableMaps) {
         locking.obtainWriteLock();
         
         try {
@@ -312,7 +312,7 @@ public class HStore {
 
   public Vector<HStoreFile> getAllMapFiles() {
     Vector<HStoreFile> flushedFiles = new Vector<HStoreFile>();
-    for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       flushedFiles.add(hsf);
     }
@@ -366,11 +366,11 @@ public class HStore {
         // Compute the max-sequenceID seen in any of the to-be-compacted TreeMaps
 
         long maxSeenSeqID = -1;
-        for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
+        for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
           HStoreFile hsf = it.next();
           long seqid = hsf.loadInfo(fs);
-          if(seqid > 0) {
-            if(seqid > maxSeenSeqID) {
+          if (seqid > 0) {
+            if (seqid > maxSeenSeqID) {
               maxSeenSeqID = seqid;
             }
           }
@@ -380,11 +380,11 @@ public class HStore {
         HStoreFile compactedOutputFile 
           = new HStoreFile(conf, compactdir, regionName, colFamily, -1);
         
-        if(toCompactFiles.size() == 1) {
+        if (toCompactFiles.size() == 1) {
           LOG.debug("nothing to compact for " + this.regionName + "/" + this.colFamily);
           
           HStoreFile hsf = toCompactFiles.elementAt(0);
-          if(hsf.loadInfo(fs) == -1) {
+          if (hsf.loadInfo(fs) == -1) {
             return;
           }
         }
@@ -414,7 +414,7 @@ public class HStore {
           BytesWritable[] vals = new BytesWritable[toCompactFiles.size()];
           boolean[] done = new boolean[toCompactFiles.size()];
           int pos = 0;
-          for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
+          for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
             HStoreFile hsf = it.next();
             readers[pos] = new MapFile.Reader(fs, hsf.getMapFilePath().toString(), conf);
             keys[pos] = new HStoreKey();
@@ -431,8 +431,8 @@ public class HStore {
           int numDone = 0;
           for(int i = 0; i < readers.length; i++) {
             readers[i].reset();
-            done[i] = ! readers[i].next(keys[i], vals[i]);
-            if(done[i]) {
+            done[i] = !readers[i].next(keys[i], vals[i]);
+            if (done[i]) {
               numDone++;
             }
           }
@@ -446,15 +446,15 @@ public class HStore {
 
             int smallestKey = -1;
             for(int i = 0; i < readers.length; i++) {
-              if(done[i]) {
+              if (done[i]) {
                 continue;
               }
               
-              if(smallestKey < 0) {
+              if (smallestKey < 0) {
                 smallestKey = i;
               
               } else {
-                if(keys[i].compareTo(keys[smallestKey]) < 0) {
+                if (keys[i].compareTo(keys[smallestKey]) < 0) {
                   smallestKey = i;
                 }
               }
@@ -463,8 +463,8 @@ public class HStore {
             // Reflect the current key/val in the output
 
             HStoreKey sk = keys[smallestKey];
-            if(lastRow.equals(sk.getRow())
-               && lastColumn.equals(sk.getColumn())) {
+            if (lastRow.equals(sk.getRow())
+                && lastColumn.equals(sk.getColumn())) {
               
               timesSeen++;
               
@@ -472,13 +472,13 @@ public class HStore {
               timesSeen = 1;
             }
             
-            if(timesSeen <= maxVersions) {
+            if (timesSeen <= maxVersions) {
 
               // Keep old versions until we have maxVersions worth.
               // Then just skip them.
 
-              if(sk.getRow().getLength() != 0
-                 && sk.getColumn().getLength() != 0) {
+              if (sk.getRow().getLength() != 0
+                  && sk.getColumn().getLength() != 0) {
                 
                 // Only write out objects which have a non-zero length key and value
 
@@ -499,7 +499,7 @@ public class HStore {
             // Advance the smallest key.  If that reader's all finished, then 
             // mark it as done.
 
-            if(! readers[smallestKey].next(keys[smallestKey], vals[smallestKey])) {
+            if (!readers[smallestKey].next(keys[smallestKey], vals[smallestKey])) {
               done[smallestKey] = true;
               readers[smallestKey].close();
               numDone++;
@@ -516,7 +516,7 @@ public class HStore {
 
         // Now, write out an HSTORE_LOGINFOFILE for the brand-new TreeMap.
 
-        if((! deleteSequenceInfo) && maxSeenSeqID >= 0) {
+        if ((!deleteSequenceInfo) && maxSeenSeqID >= 0) {
           compactedOutputFile.writeInfo(fs, maxSeenSeqID);
           
         } else {
@@ -529,7 +529,7 @@ public class HStore {
         DataOutputStream out = new DataOutputStream(fs.create(filesToReplace));
         try {
           out.writeInt(toCompactFiles.size());
-          for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
+          for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
             HStoreFile hsf = it.next();
             hsf.write(out);
           }
@@ -587,7 +587,7 @@ public class HStore {
     Path curCompactStore = HStoreFile.getHStoreDir(compactdir, regionName, colFamily);
     try {
       Path doneFile = new Path(curCompactStore, COMPACTION_DONE);
-      if(! fs.exists(doneFile)) {
+      if (!fs.exists(doneFile)) {
         
         // The last execution didn't finish the compaction, so there's nothing 
         // we can do.  We'll just have to redo it. Abandon it and return.
@@ -622,18 +622,18 @@ public class HStore {
       // 3. Unload all the replaced MapFiles.
       
       Iterator<HStoreFile> it2 = mapFiles.values().iterator();
-      for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext(); ) {
+      for(Iterator<MapFile.Reader> it = maps.values().iterator(); it.hasNext();) {
         MapFile.Reader curReader = it.next();
         HStoreFile curMapFile = it2.next();
-        if(toCompactFiles.contains(curMapFile)) {
+        if (toCompactFiles.contains(curMapFile)) {
           curReader.close();
           it.remove();
         }
       }
       
-      for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext(); ) {
+      for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext();) {
         HStoreFile curMapFile = it.next();
-        if(toCompactFiles.contains(curMapFile)) {
+        if (toCompactFiles.contains(curMapFile)) {
           it.remove();
         }
       }
@@ -645,7 +645,7 @@ public class HStore {
 
       // 4. Delete all the old files, no longer needed
       
-      for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext(); ) {
+      for(Iterator<HStoreFile> it = toCompactFiles.iterator(); it.hasNext();) {
         HStoreFile hsf = it.next();
         fs.delete(hsf.getMapFilePath());
         fs.delete(hsf.getInfoFilePath());
@@ -720,12 +720,12 @@ public class HStore {
           
           do {
             Text readcol = readkey.getColumn();
-            if(results.get(readcol) == null
-               && key.matchesWithoutColumn(readkey)) {
+            if (results.get(readcol) == null
+                && key.matchesWithoutColumn(readkey)) {
               results.put(new Text(readcol), readval.get());
               readval = new BytesWritable();
               
-            } else if(key.getRow().compareTo(readkey.getRow()) > 0) {
+            } else if (key.getRow().compareTo(readkey.getRow()) > 0) {
               break;
             }
             
@@ -745,7 +745,7 @@ public class HStore {
    * If 'numVersions' is negative, the method returns all available versions.
    */
   public byte[][] get(HStoreKey key, int numVersions) throws IOException {
-    if(numVersions == 0) {
+    if (numVersions == 0) {
       throw new IllegalArgumentException("Must request at least one value.");
     }
     
@@ -763,12 +763,12 @@ public class HStore {
           map.reset();
           HStoreKey readkey = (HStoreKey)map.getClosest(key, readval);
           
-          if(readkey.matchesRowCol(key)) {
+          if (readkey.matchesRowCol(key)) {
             results.add(readval.get());
             readval = new BytesWritable();
 
             while(map.next(readkey, readval) && readkey.matchesRowCol(key)) {
-              if(numVersions > 0 && (results.size() >= numVersions)) {
+              if (numVersions > 0 && (results.size() >= numVersions)) {
                 break;
                 
               } else {
@@ -778,12 +778,12 @@ public class HStore {
             }
           }
         }
-        if(results.size() >= numVersions) {
+        if (results.size() >= numVersions) {
           break;
         }
       }
 
-      if(results.size() == 0) {
+      if (results.size() == 0) {
         return null;
         
       } else {
@@ -809,13 +809,13 @@ public class HStore {
     // Iterate through all the MapFiles
     
     for(Iterator<Map.Entry<Long, HStoreFile>> it = mapFiles.entrySet().iterator();
-        it.hasNext(); ) {
+        it.hasNext();) {
       
       Map.Entry<Long, HStoreFile> e = it.next();
       HStoreFile curHSF = e.getValue();
       long size = fs.getLength(new Path(curHSF.getMapFilePath(), MapFile.DATA_FILE_NAME));
       
-      if(size > maxSize) {              // This is the largest one so far
+      if (size > maxSize) {              // This is the largest one so far
         maxSize = size;
         mapIndex = e.getKey();
       }
@@ -871,7 +871,7 @@ public class HStore {
       try {
         this.readers = new MapFile.Reader[mapFiles.size()];
         int i = 0;
-        for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext(); ) {
+        for(Iterator<HStoreFile> it = mapFiles.values().iterator(); it.hasNext();) {
           HStoreFile curHSF = it.next();
           readers[i++] = new MapFile.Reader(fs, curHSF.getMapFilePath().toString(), conf);
         }
@@ -885,14 +885,14 @@ public class HStore {
           keys[i] = new HStoreKey();
           vals[i] = new BytesWritable();
 
-          if(firstRow.getLength() != 0) {
-            if(findFirstRow(i, firstRow)) {
+          if (firstRow.getLength() != 0) {
+            if (findFirstRow(i, firstRow)) {
               continue;
             }
           }
           
           while(getNext(i)) {
-            if(columnMatch(i)) {
+            if (columnMatch(i)) {
               break;
             }
           }
@@ -915,7 +915,7 @@ public class HStore {
       HStoreKey firstKey
         = (HStoreKey)readers[i].getClosest(new HStoreKey(firstRow), vals[i]);
       
-      if(firstKey == null) {
+      if (firstKey == null) {
         
         // Didn't find it. Close the scanner and return TRUE
         
@@ -935,7 +935,7 @@ public class HStore {
      * @return - true if there is more data available
      */
     boolean getNext(int i) throws IOException {
-      if(! readers[i].next(keys[i], vals[i])) {
+      if (!readers[i].next(keys[i], vals[i])) {
         closeSubScanner(i);
         return false;
       }
@@ -945,7 +945,7 @@ public class HStore {
     /** Close down the indicated reader. */
     void closeSubScanner(int i) throws IOException {
       try {
-        if(readers[i] != null) {
+        if (readers[i] != null) {
           readers[i].close();
         }
         
@@ -958,10 +958,10 @@ public class HStore {
 
     /** Shut it down! */
     public void close() throws IOException {
-      if(! scannerClosed) {
+      if (!scannerClosed) {
         try {
           for(int i = 0; i < readers.length; i++) {
-            if(readers[i] != null) {
+            if (readers[i] != null) {
               readers[i].close();
             }
           }

+ 14 - 14
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreFile.java

@@ -158,13 +158,13 @@ public class HStoreFile implements HConstants, WritableComparable {
     for(int i = 0; i < datfiles.length; i++) {
       String name = datfiles[i].getName();
       
-      if(name.startsWith(HSTORE_DATFILE_PREFIX)) {
+      if (name.startsWith(HSTORE_DATFILE_PREFIX)) {
         Long fileId = Long.parseLong(name.substring(HSTORE_DATFILE_PREFIX.length()));
         HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
         Path mapfile = curfile.getMapFilePath();
         Path infofile = curfile.getInfoFilePath();
         
-        if(fs.exists(infofile)) {
+        if (fs.exists(infofile)) {
           results.add(curfile);
           
         } else {
@@ -178,12 +178,12 @@ public class HStoreFile implements HConstants, WritableComparable {
     for(int i = 0; i < infofiles.length; i++) {
       String name = infofiles[i].getName();
       
-      if(name.startsWith(HSTORE_INFOFILE_PREFIX)) {
+      if (name.startsWith(HSTORE_INFOFILE_PREFIX)) {
         long fileId = Long.parseLong(name.substring(HSTORE_INFOFILE_PREFIX.length()));
         HStoreFile curfile = new HStoreFile(conf, dir, regionName, colFamily, fileId);
         Path mapfile = curfile.getMapFilePath();
         
-        if(! fs.exists(mapfile)) {
+        if (!fs.exists(mapfile)) {
           fs.delete(curfile.getInfoFilePath());
         }
       }
@@ -220,7 +220,7 @@ public class HStoreFile implements HConstants, WritableComparable {
           while(in.next(readkey, readval)) {
             Text key = readkey.getRow();
             
-            if(key.compareTo(midKey) < 0) {
+            if (key.compareTo(midKey) < 0) {
               outA.append(readkey, readval);
               
             } else {
@@ -260,7 +260,7 @@ public class HStoreFile implements HConstants, WritableComparable {
                                             HStoreKey.class, BytesWritable.class);
     
     try {
-      for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
+      for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext();) {
         HStoreFile src = it.next();
         MapFile.Reader in = new MapFile.Reader(fs, src.getMapFilePath().toString(), conf);
         
@@ -283,11 +283,11 @@ public class HStoreFile implements HConstants, WritableComparable {
     // Build a unified InfoFile from the source InfoFiles.
 
     long unifiedSeqId = -1;
-    for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext(); ) {
+    for(Iterator<HStoreFile> it = srcFiles.iterator(); it.hasNext();) {
       HStoreFile hsf = it.next();
       long curSeqId = hsf.loadInfo(fs);
       
-      if(curSeqId > unifiedSeqId) {
+      if (curSeqId > unifiedSeqId) {
         unifiedSeqId = curSeqId;
       }
     }
@@ -301,7 +301,7 @@ public class HStoreFile implements HConstants, WritableComparable {
     
     try {
       byte flag = in.readByte();
-      if(flag == INFO_SEQ_NUM) {
+      if (flag == INFO_SEQ_NUM) {
         return in.readLong();
         
       } else {
@@ -352,17 +352,17 @@ public class HStoreFile implements HConstants, WritableComparable {
   public int compareTo(Object o) {
     HStoreFile other = (HStoreFile) o;
     int result = this.dir.compareTo(other.dir);    
-    if(result == 0) {
+    if (result == 0) {
       this.regionName.compareTo(other.regionName);
     }
-    if(result == 0) {
+    if (result == 0) {
       result = this.colFamily.compareTo(other.colFamily);
     }    
-    if(result == 0) {
-      if(this.fileId < other.fileId) {
+    if (result == 0) {
+      if (this.fileId < other.fileId) {
         result = -1;
         
-      } else if(this.fileId > other.fileId) {
+      } else if (this.fileId > other.fileId) {
         result = 1;
       }
     }

+ 9 - 9
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HStoreKey.java

@@ -29,7 +29,7 @@ public class HStoreKey implements WritableComparable {
   public static Text extractFamily(Text col) throws IOException {
     String column = col.toString();
     int colpos = column.indexOf(":");
-    if(colpos < 0) {
+    if (colpos < 0) {
       throw new IllegalArgumentException("Illegal column name has no family indicator: " + column);
     }
     return new Text(column.substring(0, colpos));
@@ -94,8 +94,8 @@ public class HStoreKey implements WritableComparable {
   }
   
   public boolean matchesRowCol(HStoreKey other) {
-    if(this.row.compareTo(other.row) == 0 &&
-       this.column.compareTo(other.column) == 0) {
+    if (this.row.compareTo(other.row) == 0 &&
+        this.column.compareTo(other.column) == 0) {
       return true;
       
     } else {
@@ -104,8 +104,8 @@ public class HStoreKey implements WritableComparable {
   }
   
   public boolean matchesWithoutColumn(HStoreKey other) {
-    if((this.row.compareTo(other.row) == 0) &&
-       (this.timestamp >= other.getTimestamp())) {
+    if ((this.row.compareTo(other.row) == 0) &&
+        (this.timestamp >= other.getTimestamp())) {
       return true;
       
     } else {
@@ -124,14 +124,14 @@ public class HStoreKey implements WritableComparable {
   public int compareTo(Object o) {
     HStoreKey other = (HStoreKey) o;
     int result = this.row.compareTo(other.row);
-    if(result == 0) {
+    if (result == 0) {
       result = this.column.compareTo(other.column);
       
-      if(result == 0) {
-        if(this.timestamp < other.timestamp) {
+      if (result == 0) {
+        if (this.timestamp < other.timestamp) {
           result = 1;
           
-        } else if(this.timestamp > other.timestamp) {
+        } else if (this.timestamp > other.timestamp) {
           result = -1;
         }
       }

+ 7 - 7
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HTableDescriptor.java

@@ -54,7 +54,7 @@ public class HTableDescriptor implements WritableComparable {
 
   /** Do we contain a given column? */
   public boolean hasFamily(Text family) {
-    if(families.contains(family)) {
+    if (families.contains(family)) {
       return true;
       
     } else {
@@ -75,7 +75,7 @@ public class HTableDescriptor implements WritableComparable {
     name.write(out);
     out.writeInt(maxVersions);
     out.writeInt(families.size());
-    for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
+    for(Iterator<Text> it = families.iterator(); it.hasNext();) {
       it.next().write(out);
     }
   }
@@ -99,21 +99,21 @@ public class HTableDescriptor implements WritableComparable {
   public int compareTo(Object o) {
     HTableDescriptor htd = (HTableDescriptor) o;
     int result = name.compareTo(htd.name);
-    if(result == 0) {
+    if (result == 0) {
       result = maxVersions - htd.maxVersions;
     }
     
-    if(result == 0) {
+    if (result == 0) {
       result = families.size() - htd.families.size();
     }
     
-    if(result == 0) {
+    if (result == 0) {
       Iterator<Text> it2 = htd.families.iterator();
-      for(Iterator<Text> it = families.iterator(); it.hasNext(); ) {
+      for(Iterator<Text> it = families.iterator(); it.hasNext();) {
         Text family1 = it.next();
         Text family2 = it2.next();
         result = family1.compareTo(family2);
-        if(result != 0) {
+        if (result != 0) {
           return result;
         }
       }

+ 6 - 6
src/contrib/hbase/src/java/org/apache/hadoop/hbase/Leases.java

@@ -77,7 +77,7 @@ public class Leases {
       synchronized(sortedLeases) {
         Lease lease = new Lease(holderId, resourceId, listener);
         Text leaseId = lease.getLeaseId();
-        if(leases.get(leaseId) != null) {
+        if (leases.get(leaseId) != null) {
           throw new IOException("Impossible state for createLease(): Lease for holderId " + holderId + " and resourceId " + resourceId + " is still held.");
         }
         leases.put(leaseId, lease);
@@ -92,7 +92,7 @@ public class Leases {
       synchronized(sortedLeases) {
         Text leaseId = createLeaseId(holderId, resourceId);
         Lease lease = leases.get(leaseId);
-        if(lease == null) {
+        if (lease == null) {
           
           // It's possible that someone tries to renew the lease, but 
           // it just expired a moment ago.  So fail.
@@ -113,7 +113,7 @@ public class Leases {
       synchronized(sortedLeases) {
         Text leaseId = createLeaseId(holderId, resourceId);
         Lease lease = leases.get(leaseId);
-        if(lease == null) {
+        if (lease == null) {
           
           // It's possible that someone tries to renew the lease, but 
           // it just expired a moment ago.  So fail.
@@ -139,7 +139,7 @@ public class Leases {
             while((sortedLeases.size() > 0)
                   && ((top = sortedLeases.first()) != null)) {
               
-              if(top.shouldExpire()) {
+              if (top.shouldExpire()) {
                 leases.remove(top.getLeaseId());
                 sortedLeases.remove(top);
 
@@ -205,10 +205,10 @@ public class Leases {
 
     public int compareTo(Object o) {
       Lease other = (Lease) o;
-      if(this.lastUpdate < other.lastUpdate) {
+      if (this.lastUpdate < other.lastUpdate) {
         return -1;
         
-      } else if(this.lastUpdate > other.lastUpdate) {
+      } else if (this.lastUpdate > other.lastUpdate) {
         return 1;
         
       } else {

+ 10 - 10
src/contrib/hbase/src/test/org/apache/hadoop/hbase/Environment.java

@@ -29,27 +29,27 @@ public class Environment {
     String value = null;
     
     value = System.getenv("DEBUGGING");
-    if(value != null && value.equalsIgnoreCase("TRUE")) {
+    if (value != null && value.equalsIgnoreCase("TRUE")) {
       debugging = true;
     }
     
     value = System.getenv("LOGGING_LEVEL");
-    if(value != null && value.length() != 0) {
-      if(value.equalsIgnoreCase("ALL")) {
+    if (value != null && value.length() != 0) {
+      if (value.equalsIgnoreCase("ALL")) {
         logLevel = Level.ALL;
-      } else if(value.equalsIgnoreCase("DEBUG")) {
+      } else if (value.equalsIgnoreCase("DEBUG")) {
         logLevel = Level.DEBUG;
-      } else if(value.equalsIgnoreCase("ERROR")) {
+      } else if (value.equalsIgnoreCase("ERROR")) {
         logLevel = Level.ERROR;
-      } else if(value.equalsIgnoreCase("FATAL")) {
+      } else if (value.equalsIgnoreCase("FATAL")) {
         logLevel = Level.FATAL;
-      } else if(value.equalsIgnoreCase("INFO")) {
+      } else if (value.equalsIgnoreCase("INFO")) {
         logLevel = Level.INFO;
-      } else if(value.equalsIgnoreCase("OFF")) {
+      } else if (value.equalsIgnoreCase("OFF")) {
         logLevel = Level.OFF;
-      } else if(value.equalsIgnoreCase("TRACE")) {
+      } else if (value.equalsIgnoreCase("TRACE")) {
         logLevel = Level.TRACE;
-      } else if(value.equalsIgnoreCase("WARN")) {
+      } else if (value.equalsIgnoreCase("WARN")) {
         logLevel = Level.WARN;
       }
     }

+ 27 - 27
src/contrib/hbase/src/test/org/apache/hadoop/hbase/TestHRegion.java

@@ -90,7 +90,7 @@ public class TestHRegion extends TestCase {
   
   public void testSetup() throws IOException {
     try {
-      if(System.getProperty("test.build.data") == null) {
+      if (System.getProperty("test.build.data") == null) {
         String dir = new File(new File("").getAbsolutePath(), "build/contrib/hbase/test").getAbsolutePath();
         System.out.println(dir);
         System.setProperty("test.build.data", dir);
@@ -98,7 +98,7 @@ public class TestHRegion extends TestCase {
       conf = new Configuration();
       
       Environment.getenv();
-      if(Environment.debugging) {
+      if (Environment.debugging) {
         Logger rootLogger = Logger.getRootLogger();
         rootLogger.setLevel(Level.WARN);
         
@@ -133,7 +133,7 @@ public class TestHRegion extends TestCase {
   // Test basic functionality. Writes to contents:basic and anchor:anchornum-*
 
   public void testBasic() throws IOException {
-    if(!initialized) {
+    if (!initialized) {
       throw new IllegalStateException();
     }
 
@@ -191,7 +191,7 @@ public class TestHRegion extends TestCase {
   // Test scanners. Writes contents:firstcol and anchor:secondcol
   
   public void testScan() throws IOException {
-    if(!initialized) {
+    if (!initialized) {
       throw new IllegalStateException();
     }
 
@@ -225,13 +225,13 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
 
           for(int j = 0; j < cols.length; j++) {
-            if(col.compareTo(cols[j]) == 0) {
+            if (col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                            + ", Value for " + col + " should be: " + k
                            + ", but was fetched as: " + curval, k, curval);
@@ -258,13 +258,13 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
 
           for(int j = 0; j < cols.length; j++) {
-            if(col.compareTo(cols[j]) == 0) {
+            if (col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                            + ", Value for " + col + " should be: " + k
                            + ", but was fetched as: " + curval, k, curval);
@@ -299,13 +299,13 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
 
           for(int j = 0; j < cols.length; j++) {
-            if(col.compareTo(cols[j]) == 0) {
+            if (col.compareTo(cols[j]) == 0) {
               assertEquals("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                            + ", Value for " + col + " should be: " + k
                            + ", but was fetched as: " + curval, k, curval);
@@ -332,7 +332,7 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
@@ -362,7 +362,7 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 500;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
@@ -390,10 +390,10 @@ public class TestHRegion extends TestCase {
   // Creates contents:body
   
   public void testBatchWrite() throws IOException {
-    if(!initialized || failures) {
+    if (!initialized || failures) {
       throw new IllegalStateException();
     }
-    if(! Environment.debugging) {
+    if (!Environment.debugging) {
       return;
     }
 
@@ -437,7 +437,7 @@ public class TestHRegion extends TestCase {
         }
       }
       long startCompact = System.currentTimeMillis();
-      if(region.compactStores()) {
+      if (region.compactStores()) {
         totalCompact = System.currentTimeMillis() - startCompact;
         System.out.println("Region compacted - elapsedTime: " + (totalCompact / 1000.0));
         
@@ -467,14 +467,14 @@ public class TestHRegion extends TestCase {
   // NOTE: This test depends on testBatchWrite succeeding
   
   public void testSplitAndMerge() throws IOException {
-    if(!initialized || failures) {
+    if (!initialized || failures) {
       throw new IllegalStateException();
     }
     
     try {
       Text midKey = new Text();
       
-      if(region.needsSplit(midKey)) {
+      if (region.needsSplit(midKey)) {
         System.out.println("Needs split");
       }
       
@@ -504,7 +504,7 @@ public class TestHRegion extends TestCase {
   // This test verifies that everything is still there after splitting and merging
   
   public void testRead() throws IOException {
-    if(!initialized || failures) {
+    if (!initialized || failures) {
       throw new IllegalStateException();
     }
 
@@ -525,19 +525,19 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           String curval = new String(val).trim();
 
-          if(col.compareTo(CONTENTS_BASIC) == 0) {
+          if (col.compareTo(CONTENTS_BASIC) == 0) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                        + ", Value for " + col + " should start with: " + CONTENTSTR
                        + ", but was fetched as: " + curval,
                        curval.startsWith(CONTENTSTR));
             contentsFetched++;
             
-          } else if(col.toString().startsWith(ANCHORNUM)) {
+          } else if (col.toString().startsWith(ANCHORNUM)) {
             assertTrue("Error at:" + curKey.getRow() + "/" + curKey.getTimestamp()
                        + ", Value for " + col + " should start with: " + ANCHORSTR
                        + ", but was fetched as: " + curval,
@@ -572,7 +572,7 @@ public class TestHRegion extends TestCase {
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       int k = 0;
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           Text col = it.next();
           byte val[] = curVals.get(col);
           int curval = Integer.parseInt(new String(val).trim());
@@ -596,7 +596,7 @@ public class TestHRegion extends TestCase {
     
     // Verify testBatchWrite data
 
-    if(Environment.debugging) {
+    if (Environment.debugging) {
       s = region.getScanner(new Text[] { CONTENTS_BODY }, new Text());
       try {
         int numFetched = 0;
@@ -604,7 +604,7 @@ public class TestHRegion extends TestCase {
         TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
         int k = 0;
         while(s.next(curKey, curVals)) {
-          for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+          for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
             Text col = it.next();
             byte val[] = curVals.get(col);
 
@@ -635,7 +635,7 @@ public class TestHRegion extends TestCase {
       HStoreKey curKey = new HStoreKey();
       TreeMap<Text, byte[]> curVals = new TreeMap<Text, byte[]>();
       while(s.next(curKey, curVals)) {
-        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext(); ) {
+        for(Iterator<Text> it = curVals.keySet().iterator(); it.hasNext();) {
           it.next();
           fetched++;
         }
@@ -650,7 +650,7 @@ public class TestHRegion extends TestCase {
 
   
   private static void deleteFile(File f) {
-    if(f.isDirectory()) {
+    if (f.isDirectory()) {
       File[] children = f.listFiles();
       for(int i = 0; i < children.length; i++) {
         deleteFile(children[i]);
@@ -660,7 +660,7 @@ public class TestHRegion extends TestCase {
   }
   
   public void testCleanup() throws IOException {
-    if(!initialized) {
+    if (!initialized) {
       throw new IllegalStateException();
     }
 

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/CompoundDirSpec.java

@@ -219,7 +219,7 @@ class CompoundDirSpec {
   public static String expandGlobInputSpec(String inputSpec, JobConf job)
   {
     inputSpec = inputSpec.trim();
-    if(!inputSpec.startsWith(MERGEGLOB_PREFIX)) {
+    if (!inputSpec.startsWith(MERGEGLOB_PREFIX)) {
       return inputSpec;
     }
     inputSpec = inputSpec.substring(MERGEGLOB_PREFIX.length());

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java

@@ -115,7 +115,7 @@ public class JarBuilder {
     JarEntry entry = null;
     while (entries.hasMoreElements()) {
       entry = (JarEntry) entries.nextElement();
-      //if(entry.getName().startsWith("META-INF/")) continue; 
+      //if (entry.getName().startsWith("META-INF/")) continue; 
       InputStream in = src.getInputStream(entry);
       addNamedStream(dst, entry.getName(), in);
     }

+ 3 - 3
src/contrib/streaming/src/java/org/apache/hadoop/streaming/MergerInputFormat.java

@@ -82,7 +82,7 @@ public class MergerInputFormat extends InputFormatBase {
   /** Delegate to the primary InputFormat. 
       Force full-file splits since there's no index to sync secondaries.
       (and if there was, this index may need to be created for the first time
-      full file at a time...    )
+      full file at a time...   )
   */
   public InputSplit[] getSplits(JobConf job, int numSplits) throws IOException {
     return ((StreamInputFormat) primary_).getSplits(job, numSplits);
@@ -121,7 +121,7 @@ public class MergerInputFormat extends InputFormatBase {
   /*
     private FileSplit relatedSplit(FileSplit primarySplit, int i, CompoundDirSpec spec) throws IOException
     {
-    if(i == 0) {
+    if (i == 0) {
     return primarySplit;
     }
 
@@ -330,7 +330,7 @@ class MergeRecordStream {
   Writable v_;
 
   public MergeRecordStream(int index, RecordReader reader, WritableComparable k, Writable v)
-      throws IOException {
+    throws IOException {
     index_ = index;
     reader_ = reader;
     k_ = k;

+ 2 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java

@@ -261,7 +261,7 @@ public abstract class PipeMapRed {
           finalOutputURI = new URI(sideEffectPathFinal_.toString()); // implicit dfs: 
         }
         // apply default scheme
-        if(finalOutputURI.getScheme() == null) {
+        if (finalOutputURI.getScheme() == null) {
           finalOutputURI = new URI("file", finalOutputURI.getSchemeSpecificPart(), null);
         }
         boolean allowSocket = useSingleSideOutputURI_;
@@ -579,7 +579,7 @@ public abstract class PipeMapRed {
           logprintln("closing " + finalOutputURI);
           if (sideEffectOut_ != null) sideEffectOut_.close();
           logprintln("closed  " + finalOutputURI);
-          if ( ! useSingleSideOutputURI_) {
+          if (!useSingleSideOutputURI_) {
             ((PhasedFileSystem)sideFs_).commit(); 
           }
         }

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java

@@ -70,7 +70,7 @@ public class PipeMapper extends PipeMapRed implements Mapper {
     if (outThread_ == null) {
       startOutputThreads(output, reporter);
     }
-    if( outerrThreadsThrowable != null ) {
+    if (outerrThreadsThrowable != null) {
       mapRedFinished();
       throw new IOException ("MROutput/MRErrThread failed:"
                              + StringUtils.stringifyException(

+ 2 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java

@@ -74,10 +74,10 @@ public class PipeReducer extends PipeMapRed implements Reducer {
         numRecRead_++;
         maybeLogRecord();
         if (doPipe_) {
-          if( outerrThreadsThrowable != null ) {
+          if (outerrThreadsThrowable != null) {
             mapRedFinished();
             throw new IOException ("MROutput/MRErrThread failed:"
-                                   + StringUtils.stringifyException( 
+                                   + StringUtils.stringifyException(
                                                                     outerrThreadsThrowable));
           }
           write(key);

+ 39 - 39
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java

@@ -82,7 +82,7 @@ public class StreamJob {
     new DefaultOptionBuilder("-","-", false);
   private ArgumentBuilder argBuilder = new ArgumentBuilder(); 
   private Parser parser = new Parser(); 
-  private Group allOptions ; 
+  private Group allOptions; 
   HelpFormatter helpFormatter = new HelpFormatter("  ", "  ", "  ", 900);
   // need these two at class level to extract values later from 
   // commons-cli command line
@@ -197,7 +197,7 @@ public class StreamJob {
   }
 
   void parseArgv(){
-    CommandLine cmdLine = null ; 
+    CommandLine cmdLine = null; 
     try{
       cmdLine = parser.parse(argv_);
     }catch(Exception oe){
@@ -209,10 +209,10 @@ public class StreamJob {
       }
     }
     
-    if( cmdLine != null ){
-      verbose_ =  cmdLine.hasOption("-verbose") ;
-      detailedUsage_ = cmdLine.hasOption("-info") ;
-      debug_ = cmdLine.hasOption("-debug")? debug_ + 1 : debug_ ;
+    if (cmdLine != null){
+      verbose_ =  cmdLine.hasOption("-verbose");
+      detailedUsage_ = cmdLine.hasOption("-info");
+      debug_ = cmdLine.hasOption("-debug")? debug_ + 1 : debug_;
       inputTagged_ = cmdLine.hasOption("-inputtagged"); 
       
       inputSpecs_.addAll(cmdLine.getValues("-input"));
@@ -230,12 +230,12 @@ public class StreamJob {
       configPath_.addAll(cmdLine.getValues("-config"));
       
       String fsName = (String)cmdLine.getValue("-dfs");
-      if( null != fsName ){
+      if (null != fsName){
         userJobConfProps_.put("fs.default.name", fsName);        
       }
       
       String jt = (String)cmdLine.getValue("mapred.job.tracker");
-      if( null != jt ){
+      if (null != jt){
         userJobConfProps_.put("fs.default.name", jt);        
       }
       
@@ -246,15 +246,15 @@ public class StreamJob {
       inReaderSpec_ = (String)cmdLine.getValue("-inputreader"); 
       
       List<String> car = cmdLine.getValues("-cacheArchive"); 
-      if( null != car ){
-        for( String s : car ){
+      if (null != car){
+        for(String s : car){
           cacheArchives = (cacheArchives == null)?s :cacheArchives + "," + s;  
         }
       }
 
       List<String> caf = cmdLine.getValues("-cacheFile"); 
-      if( null != caf ){
-        for( String s : caf ){
+      if (null != caf){
+        for(String s : caf){
           cacheFiles = (cacheFiles == null)?s :cacheFiles + "," + s;  
         }
       }
@@ -262,14 +262,14 @@ public class StreamJob {
       List<String> jobConfArgs = (List<String>)cmdLine.getValue(jobconf); 
       List<String> envArgs = (List<String>)cmdLine.getValue(cmdenv); 
       
-      if( null != jobConfArgs ){
-        for( String s : jobConfArgs){
+      if (null != jobConfArgs){
+        for(String s : jobConfArgs){
           String []parts = s.split("="); 
           userJobConfProps_.put(parts[0], parts[1]);
         }
       }
-      if( null != envArgs ){
-        for( String s : envArgs ){
+      if (null != envArgs){
+        for(String s : envArgs){
           if (addTaskEnvironment_.length() > 0) {
             addTaskEnvironment_ += " ";
           }
@@ -310,7 +310,7 @@ public class StreamJob {
       withMinimum(1).
       withMaximum(max).
       withValidator(validator).
-      create() ;
+      create();
    
     return builder.
       withLongName(name).
@@ -332,15 +332,15 @@ public class StreamJob {
           // an can exec check in java 6
           for (String file : (List<String>)values) {
             File f = new File(file);  
-            if ( ! f.exists() ) {
+            if (!f.exists()) {
               throw new InvalidArgumentException("Argument : " + 
                                                  f.getAbsolutePath() + " doesn't exist."); 
             }
-            if ( ! f.isFile() ) {
+            if (!f.isFile()) {
               throw new InvalidArgumentException("Argument : " + 
                                                  f.getAbsolutePath() + " is not a file."); 
             }
-            if ( ! f.canRead() ) {
+            if (!f.canRead()) {
               throw new InvalidArgumentException("Argument : " + 
                                                  f.getAbsolutePath() + " is not accessible"); 
             }
@@ -378,7 +378,7 @@ public class StreamJob {
     Option mapper  = createOption("mapper", 
                                   "The streaming command to run", "cmd", 1, false);
     Option combiner = createOption("combiner", 
-                                   "The streaming command to run", "cmd",1, false);
+                                   "The streaming command to run", "cmd", 1, false);
     // reducer could be NONE 
     Option reducer = createOption("reducer", 
                                   "The streaming command to run", "cmd", 1, false); 
@@ -388,21 +388,21 @@ public class StreamJob {
     Option dfs = createOption("dfs", 
                               "Optional. Override DFS configuration", "<h:p>|local", 1, false); 
     Option jt = createOption("jt", 
-                             "Optional. Override JobTracker configuration", "<h:p>|local",1, false);
+                             "Optional. Override JobTracker configuration", "<h:p>|local", 1, false);
     Option additionalconfspec = createOption("additionalconfspec", 
-                                             "Optional.", "spec",1, false );
+                                             "Optional.", "spec", 1, false);
     Option inputformat = createOption("inputformat", 
-                                      "Optional.", "spec",1, false );
+                                      "Optional.", "spec", 1, false);
     Option outputformat = createOption("outputformat", 
-                                       "Optional.", "spec",1, false );
+                                       "Optional.", "spec", 1, false);
     Option partitioner = createOption("partitioner", 
-                                      "Optional.", "spec",1, false );
+                                      "Optional.", "spec", 1, false);
     Option inputreader = createOption("inputreader", 
-                                      "Optional.", "spec",1, false );
+                                      "Optional.", "spec", 1, false);
     Option cacheFile = createOption("cacheFile", 
                                     "File name URI", "fileNameURI", 1, false);
     Option cacheArchive = createOption("cacheArchive", 
-                                       "File name URI", "fileNameURI",1, false);
+                                       "File name URI", "fileNameURI", 1, false);
     
     // boolean properties
     
@@ -844,7 +844,7 @@ public class StreamJob {
     if (cacheFiles != null)
       DistributedCache.setCacheFiles(fileURIs, jobConf_);
     
-    if(verbose_) {
+    if (verbose_) {
       listJobConfProperties();
     }
    
@@ -956,7 +956,7 @@ public class StreamJob {
       LOG.info("To kill this job, run:");
       LOG.info(getHadoopClientHome() + "/bin/hadoop job  -Dmapred.job.tracker=" + hp + " -kill "
                + jobId_);
-      //LOG.info("Job file: " + running_.getJobFile() );
+      //LOG.info("Job file: " + running_.getJobFile());
       LOG.info("Tracking URL: " + StreamUtil.qualifyHost(running_.getTrackingURL()));
     }
   }
@@ -1012,7 +1012,7 @@ public class StreamJob {
     }catch(FileAlreadyExistsException fae){
       LOG.error("Error launching job , Output path already exists : " 
                 + fae.getMessage());
-    }catch( IOException ioe){
+    }catch(IOException ioe){
       LOG.error("Error Launching job : " + ioe.getMessage());
     }
     finally {
@@ -1025,7 +1025,7 @@ public class StreamJob {
   }
   /** Support -jobconf x=y x1=y1 type options **/
   class MultiPropertyOption extends PropertyOption{
-    private String optionString ; 
+    private String optionString; 
     MultiPropertyOption(){
       super(); 
     }
@@ -1033,7 +1033,7 @@ public class StreamJob {
     MultiPropertyOption(final String optionString,
                         final String description,
                         final int id){
-      super(optionString, description, id) ; 
+      super(optionString, description, id); 
       this.optionString = optionString;
     }
 
@@ -1053,10 +1053,10 @@ public class StreamJob {
       }
       
       ArrayList properties = new ArrayList(); 
-      String next = "" ; 
-      while( arguments.hasNext()){
+      String next = ""; 
+      while(arguments.hasNext()){
         next = (String) arguments.next();
-        if( ! next.startsWith("-") ){
+        if (!next.startsWith("-")){
           properties.add(next);
         }else{
           arguments.previous();
@@ -1064,9 +1064,9 @@ public class StreamJob {
         }
       } 
 
-      // add to any existing values ( support specifying args multiple times)
-      List<String> oldVal = (List<String>)commandLine.getValue(this) ; 
-      if( oldVal == null ){
+      // add to any existing values (support specifying args multiple times)
+      List<String> oldVal = (List<String>)commandLine.getValue(this); 
+      if (oldVal == null){
         commandLine.addValue(this, properties);
       }else{
         oldVal.addAll(properties); 

+ 2 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamUtil.java

@@ -494,10 +494,10 @@ public class StreamUtil {
   public static String getBoundAntProperty(String name, String defaultVal)
   {
     String val = System.getProperty(name);
-    if(val != null && val.indexOf("${") >= 0) {
+    if (val != null && val.indexOf("${") >= 0) {
       val = null;
     }
-    if(val == null) {
+    if (val == null) {
       val = defaultVal;
     }
     return val;

+ 1 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java

@@ -101,7 +101,7 @@ public class StreamXmlRecordReader extends StreamBaseRecordReader {
     ((Text) key).set(record);
     ((Text) value).set("");
 
-    /*if(numNext < 5) {
+    /*if (numNext < 5) {
       System.out.println("@@@ " + numNext + ". true next k=|" + key.toString().replaceAll("[\\r\\n]", " ")
       + "|, len=" + buf.length() + " v=|" + value.toString().replaceAll("[\\r\\n]", " ") + "|");
       }*/

+ 6 - 6
src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java

@@ -39,7 +39,7 @@ public class UTF8ByteArrayUtils {
    */
   public static int findTab(byte [] utf, int start, int length) {
     for(int i=start; i<(start+length); i++) {
-      if(utf[i]==(byte)'\t') {
+      if (utf[i]==(byte)'\t') {
         return i;
       }
     }
@@ -68,9 +68,9 @@ public class UTF8ByteArrayUtils {
    */
   public static void splitKeyVal(byte[] utf, int start, int length, 
                                  Text key, Text val, int splitPos) throws IOException {
-    if(splitPos<start || splitPos >= (start+length))
-      throw new IllegalArgumentException( "splitPos must be in the range " +
-                                          "[" + start + ", " + (start+length) + "]: " + splitPos);
+    if (splitPos<start || splitPos >= (start+length))
+      throw new IllegalArgumentException("splitPos must be in the range " +
+                                         "[" + start + ", " + (start+length) + "]: " + splitPos);
     int keyLen = (splitPos-start);
     byte [] keyBytes = new byte[keyLen];
     System.arraycopy(utf, start, keyBytes, 0, keyLen);
@@ -122,7 +122,7 @@ public class UTF8ByteArrayUtils {
       if (c == '\r') {
         in.mark(1);
         int c2 = in.read();
-        if(c2 == -1) {
+        if (c2 == -1) {
           isEOF = true;
           break;
         }
@@ -142,7 +142,7 @@ public class UTF8ByteArrayUtils {
       buf[offset++] = (byte) c;
     }
 
-    if(isEOF && offset==0) {
+    if (isEOF && offset==0) {
       return null;
     } else {
       lineBuffer = new byte[offset];

+ 4 - 4
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java

@@ -217,7 +217,7 @@ public class TestStreamedMerge extends TestCase {
     String overrideFS = StreamUtil.getBoundAntProperty("fs.default.name", null);
     MiniDFSCluster cluster = null;
     try {
-      if(overrideFS == null) {
+      if (overrideFS == null) {
         cluster = new MiniDFSCluster(conf_, 1, true, null);
         fs_ = cluster.getFileSystem();
       } else {
@@ -265,7 +265,7 @@ public class TestStreamedMerge extends TestCase {
     } else {
       String userOut = StreamUtil.getBoundAntProperty(
                                                       "hadoop.test.localoutputfile", null);
-      if(userOut != null) {
+      if (userOut != null) {
         f = new File(userOut);
         // don't delete so they can mkfifo
         maybeFifoOutput_ = true;
@@ -275,7 +275,7 @@ public class TestStreamedMerge extends TestCase {
         maybeFifoOutput_ = false;
       }
       String s = new Path(f.getAbsolutePath()).toString();
-      if(! s.startsWith("/")) {
+      if (!s.startsWith("/")) {
         s = "/" + s; // Windows "file:/C:/"
       }
       sideOutput = "file:" + s;
@@ -292,7 +292,7 @@ public class TestStreamedMerge extends TestCase {
       }
       output = outputBuf.toString();
     } else {
-      if(maybeFifoOutput_) {
+      if (maybeFifoOutput_) {
         System.out.println("assertEquals will fail.");
         output = "potential FIFO: not retrieving to avoid blocking on open() "
           + f.getAbsoluteFile();

+ 3 - 3
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TrApp.java

@@ -68,7 +68,7 @@ public class TrApp
   void expect(String evName, String evVal) throws IOException
   {
     String got = env.getProperty(evName);
-    if(! evVal.equals(got)) {
+    if (!evVal.equals(got)) {
       String msg = "FAIL evName=" + evName + " got=" + got + " expect=" + evVal;
       throw new IOException(msg);
     }
@@ -77,7 +77,7 @@ public class TrApp
   void expectDefined(String evName) throws IOException
   {
     String got = env.getProperty(evName);
-    if(got == null) {
+    if (got == null) {
       String msg = "FAIL evName=" + evName + " is undefined. Expect defined.";
       throw new IOException(msg);
     }
@@ -105,7 +105,7 @@ public class TrApp
 
   public static String CUnescape(String s)
   {
-    if(s.equals("\\n")) {
+    if (s.equals("\\n")) {
       return "\n";
     } else {
       return s;

+ 1 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/UniqApp.java

@@ -39,7 +39,7 @@ public class UniqApp
     String line;
     String prevLine = null;
     while ((line = in.readLine()) != null) {
-      if(! line.equals(prevLine)) {
+      if (!line.equals(prevLine)) {
         System.out.println(header + line);
       }
       prevLine = line;

+ 2 - 2
src/contrib/streaming/src/test/org/apache/hadoop/streaming/UtilTest.java

@@ -34,7 +34,7 @@ class UtilTest {
 
   void checkUserDir() {
     // trunk/src/contrib/streaming --> trunk/build/contrib/streaming/test/data
-    if(! userDir_.equals(antTestDir_)) {
+    if (!userDir_.equals(antTestDir_)) {
       // because changes to user.dir are ignored by File static methods.
       throw new IllegalStateException("user.dir != test.build.data. The junit Ant task must be forked.");
     }
@@ -43,7 +43,7 @@ class UtilTest {
   void redirectIfAntJunit() throws IOException
   {
     boolean fromAntJunit = System.getProperty("test.build.data") != null;
-    if(fromAntJunit) {
+    if (fromAntJunit) {
       new File(antTestDir_).mkdirs();
       File outFile = new File(antTestDir_, testName_+".log");
       PrintStream out = new PrintStream(new FileOutputStream(outFile));

+ 13 - 13
src/java/org/apache/hadoop/conf/Configuration.java

@@ -107,7 +107,7 @@ public class Configuration {
     this.finalResources = (ArrayList)other.finalResources.clone();
     if (other.properties != null)
       this.properties = (Properties)other.properties.clone();
-    if(other.overlay!=null)
+    if (other.overlay!=null)
       this.overlay = (Properties)other.overlay.clone();
   }
 
@@ -142,7 +142,7 @@ public class Configuration {
   }
 
   private synchronized void addResource(ArrayList<Object> resources,
-      Object resource) {
+                                        Object resource) {
     
     resources.add(resource);                      // add to resources
     properties = null;                            // trigger reload
@@ -172,23 +172,23 @@ public class Configuration {
   private static int MAX_SUBST = 20;
 
   private String substituteVars(String expr) {
-    if(expr == null) {
+    if (expr == null) {
       return null;
     }
     Matcher match = varPat.matcher("");
     String eval = expr;
     for(int s=0; s<MAX_SUBST; s++) {
       match.reset(eval);
-      if(! match.find()) {
+      if (!match.find()) {
         return eval;
       }
       String var = match.group();
       var = var.substring(2, var.length()-1); // remove ${ .. }
       String val = System.getProperty(var);
-      if(val == null) {
+      if (val == null) {
         val = (String)this.getObject(var);
       }
-      if(val == null) {
+      if (val == null) {
         return eval; // return literal ${var}: var is unbound
       }
       // substitute
@@ -211,7 +211,7 @@ public class Configuration {
   }
   
   private synchronized Properties getOverlay() {
-    if(overlay==null){
+    if (overlay==null){
       overlay=new Properties();
     }
     return overlay;
@@ -221,7 +221,7 @@ public class Configuration {
    * exists, then <code>defaultValue</code> is returned.
    */
   public String get(String name, String defaultValue) {
-     return substituteVars(getProps().getProperty(name, defaultValue));
+    return substituteVars(getProps().getProperty(name, defaultValue));
   }
     
   /** Returns the value of the <code>name</code> property as an integer.  If no
@@ -338,7 +338,7 @@ public class Configuration {
    * interface. 
    */
   public Class<?> getClass(String propertyName, Class<?> defaultValue,
-      Class<?> xface) {
+                           Class<?> xface) {
     
     try {
       Class<?> theClass = getClass(propertyName, defaultValue);
@@ -354,7 +354,7 @@ public class Configuration {
    * First checks that the class implements the named interface. 
    */
   public void setClass(String propertyName, Class<?> theClass,
-      Class<?> xface) {
+                       Class<?> xface) {
     
     if (!xface.isAssignableFrom(theClass))
       throw new RuntimeException(theClass+" not "+xface.getName());
@@ -380,7 +380,7 @@ public class Configuration {
       }
     }
     LOG.warn("Could not make " + path + 
-                " in local directories from " + dirsProp);
+             " in local directories from " + dirsProp);
     for(int i=0; i < dirs.length; i++) {
       int index = (hashCode+i & Integer.MAX_VALUE) % dirs.length;
       LOG.warn(dirsProp + "[" + index + "]=" + dirs[index]);
@@ -460,7 +460,7 @@ public class Configuration {
       loadResources(newProps, defaultResources, false, quietmode);
       loadResources(newProps, finalResources, true, true);
       properties = newProps;
-      if(overlay!=null)
+      if (overlay!=null)
         properties.putAll(overlay);
     }
     return properties;
@@ -575,7 +575,7 @@ public class Configuration {
         String name = (String)e.nextElement();
         Object object = properties.get(name);
         String value = null;
-        if(object instanceof String) {
+        if (object instanceof String) {
           value = (String) object;
         }else {
           continue;

+ 3 - 3
src/java/org/apache/hadoop/dfs/Block.java

@@ -112,7 +112,7 @@ class Block implements Writable, Comparable {
   public void readFields(DataInput in) throws IOException {
     this.blkid = in.readLong();
     this.len = in.readLong();
-    if( len < 0 ) {
+    if (len < 0) {
       throw new IOException("Unexpected block size: " + len);
     }
   }
@@ -122,9 +122,9 @@ class Block implements Writable, Comparable {
   /////////////////////////////////////
   public int compareTo(Object o) {
     Block b = (Block) o;
-    if ( blkid < b.blkid ) {
+    if (blkid < b.blkid) {
       return -1;
-    } else if ( blkid == b.blkid ) {
+    } else if (blkid == b.blkid) {
       return 0;
     } else {
       return 1;

+ 70 - 70
src/java/org/apache/hadoop/dfs/BlockCommand.java

@@ -24,10 +24,10 @@ class DatanodeCommand implements Writable {
   DatanodeProtocol.DataNodeAction action;
   
   public DatanodeCommand() {
-    this( DatanodeProtocol.DataNodeAction.DNA_UNKNOWN );
+    this(DatanodeProtocol.DataNodeAction.DNA_UNKNOWN);
   }
   
-  public DatanodeCommand( DatanodeProtocol.DataNodeAction action ) {
+  public DatanodeCommand(DatanodeProtocol.DataNodeAction action) {
     this.action = action;
   }
 
@@ -43,12 +43,12 @@ class DatanodeCommand implements Writable {
   }
 
   public void write(DataOutput out) throws IOException {
-    WritableUtils.writeEnum( out, action );
+    WritableUtils.writeEnum(out, action);
   }
   
   public void readFields(DataInput in) throws IOException {
     this.action = (DatanodeProtocol.DataNodeAction)
-      WritableUtils.readEnum( in, DatanodeProtocol.DataNodeAction.class );
+      WritableUtils.readEnum(in, DatanodeProtocol.DataNodeAction.class);
   }
 }
 
@@ -62,81 +62,81 @@ class DatanodeCommand implements Writable {
  * @author Mike Cafarella
  ****************************************************/
 class BlockCommand extends DatanodeCommand {
-    Block blocks[];
-    DatanodeInfo targets[][];
+  Block blocks[];
+  DatanodeInfo targets[][];
 
-    public BlockCommand() {}
+  public BlockCommand() {}
 
-    /**
-     * Create BlockCommand for transferring blocks to another datanode
-     * @param blocks    blocks to be transferred 
-     * @param targets   nodes to transfer
-     */
-    public BlockCommand(Block blocks[], DatanodeInfo targets[][]) {
-      super(  DatanodeProtocol.DataNodeAction.DNA_TRANSFER );
-      this.blocks = blocks;
-      this.targets = targets;
-    }
+  /**
+   * Create BlockCommand for transferring blocks to another datanode
+   * @param blocks    blocks to be transferred 
+   * @param targets   nodes to transfer
+   */
+  public BlockCommand(Block blocks[], DatanodeInfo targets[][]) {
+    super( DatanodeProtocol.DataNodeAction.DNA_TRANSFER);
+    this.blocks = blocks;
+    this.targets = targets;
+  }
 
-    /**
-     * Create BlockCommand for block invalidation
-     * @param blocks  blocks to invalidate
-     */
-    public BlockCommand(Block blocks[]) {
-      super( DatanodeProtocol.DataNodeAction.DNA_INVALIDATE );
-      this.blocks = blocks;
-      this.targets = new DatanodeInfo[0][];
-    }
+  /**
+   * Create BlockCommand for block invalidation
+   * @param blocks  blocks to invalidate
+   */
+  public BlockCommand(Block blocks[]) {
+    super(DatanodeProtocol.DataNodeAction.DNA_INVALIDATE);
+    this.blocks = blocks;
+    this.targets = new DatanodeInfo[0][];
+  }
 
-    public Block[] getBlocks() {
-        return blocks;
-    }
+  public Block[] getBlocks() {
+    return blocks;
+  }
 
-    public DatanodeInfo[][] getTargets() {
-        return targets;
-    }
+  public DatanodeInfo[][] getTargets() {
+    return targets;
+  }
 
-    ///////////////////////////////////////////
-    // Writable
-    ///////////////////////////////////////////
-    static {                                      // register a ctor
-      WritableFactories.setFactory
-        (BlockCommand.class,
-         new WritableFactory() {
-           public Writable newInstance() { return new BlockCommand(); }
-         });
-    }
+  ///////////////////////////////////////////
+  // Writable
+  ///////////////////////////////////////////
+  static {                                      // register a ctor
+    WritableFactories.setFactory
+      (BlockCommand.class,
+       new WritableFactory() {
+         public Writable newInstance() { return new BlockCommand(); }
+       });
+  }
 
-    public void write(DataOutput out) throws IOException {
-        super.write( out );
-        out.writeInt(blocks.length);
-        for (int i = 0; i < blocks.length; i++) {
-            blocks[i].write(out);
-        }
-        out.writeInt(targets.length);
-        for (int i = 0; i < targets.length; i++) {
-            out.writeInt(targets[i].length);
-            for (int j = 0; j < targets[i].length; j++) {
-                targets[i][j].write(out);
-            }
-        }
+  public void write(DataOutput out) throws IOException {
+    super.write(out);
+    out.writeInt(blocks.length);
+    for (int i = 0; i < blocks.length; i++) {
+      blocks[i].write(out);
+    }
+    out.writeInt(targets.length);
+    for (int i = 0; i < targets.length; i++) {
+      out.writeInt(targets[i].length);
+      for (int j = 0; j < targets[i].length; j++) {
+        targets[i][j].write(out);
+      }
     }
+  }
 
-    public void readFields(DataInput in) throws IOException {
-        super.readFields( in );
-        this.blocks = new Block[in.readInt()];
-        for (int i = 0; i < blocks.length; i++) {
-            blocks[i] = new Block();
-            blocks[i].readFields(in);
-        }
+  public void readFields(DataInput in) throws IOException {
+    super.readFields(in);
+    this.blocks = new Block[in.readInt()];
+    for (int i = 0; i < blocks.length; i++) {
+      blocks[i] = new Block();
+      blocks[i].readFields(in);
+    }
 
-        this.targets = new DatanodeInfo[in.readInt()][];
-        for (int i = 0; i < targets.length; i++) {
-            this.targets[i] = new DatanodeInfo[in.readInt()];
-            for (int j = 0; j < targets[i].length; j++) {
-                targets[i][j] = new DatanodeInfo();
-                targets[i][j].readFields(in);
-            }
-        }
+    this.targets = new DatanodeInfo[in.readInt()][];
+    for (int i = 0; i < targets.length; i++) {
+      this.targets[i] = new DatanodeInfo[in.readInt()];
+      for (int j = 0; j < targets[i].length; j++) {
+        targets[i][j] = new DatanodeInfo();
+        targets[i][j].readFields(in);
+      }
     }
+  }
 }

+ 14 - 14
src/java/org/apache/hadoop/dfs/ClientProtocol.java

@@ -61,12 +61,12 @@ interface ClientProtocol extends VersionedProtocol {
    * create multi-block files must also use reportWrittenBlock()
    * and addBlock().
    */
-  public LocatedBlock create( String src, 
-                              String clientName, 
-                              boolean overwrite, 
-                              short replication,
-                              long blockSize
-                              ) throws IOException;
+  public LocatedBlock create(String src, 
+                             String clientName, 
+                             boolean overwrite, 
+                             short replication,
+                             long blockSize
+                             ) throws IOException;
 
   /**
    * Set replication for an existing file.
@@ -83,9 +83,9 @@ interface ClientProtocol extends VersionedProtocol {
    *         false if file does not exist or is a directory
    * @author shv
    */
-  public boolean setReplication( String src, 
-                                 short replication
-                                 ) throws IOException;
+  public boolean setReplication(String src, 
+                                short replication
+                                ) throws IOException;
 
   /**
    * If the client has not yet called reportWrittenBlock(), it can
@@ -261,7 +261,7 @@ interface ClientProtocol extends VersionedProtocol {
    * <p>
    * Safe mode is entered automatically at name node startup.
    * Safe mode can also be entered manually using
-   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_GET )}.
+   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
    * <p>
    * At startup the name node accepts data node reports collecting
    * information about block locations.
@@ -277,11 +277,11 @@ interface ClientProtocol extends VersionedProtocol {
    * Then the name node leaves safe mode.
    * <p>
    * If safe mode is turned on manually using
-   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_ENTER )}
+   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
    * then the name node stays in safe mode until it is manually turned off
-   * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_LEAVE )}.
+   * using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
    * Current state of the name node can be verified using
-   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode( SafeModeAction.SAFEMODE_GET )}
+   * {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
    * <h4>Configuration parameters:</h4>
    * <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
    * <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
@@ -304,7 +304,7 @@ interface ClientProtocol extends VersionedProtocol {
    * @throws IOException
    * @author Konstantin Shvachko
    */
-  public boolean setSafeMode( FSConstants.SafeModeAction action ) throws IOException;
+  public boolean setSafeMode(FSConstants.SafeModeAction action) throws IOException;
 
   /**
    * Tells the namenode to reread the hosts and exclude files. 

+ 37 - 37
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -216,10 +216,10 @@ class DFSClient implements FSConstants {
    * @return output stream
    * @throws IOException
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite
-                              ) throws IOException {
-    return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite
+                             ) throws IOException {
+    return create(src, overwrite, defaultReplication, defaultBlockSize, null);
   }
     
   /**
@@ -231,11 +231,11 @@ class DFSClient implements FSConstants {
    * @return output stream
    * @throws IOException
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite,
-                              Progressable progress
-                              ) throws IOException {
-    return create( src, overwrite, defaultReplication, defaultBlockSize, null);
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite,
+                             Progressable progress
+                             ) throws IOException {
+    return create(src, overwrite, defaultReplication, defaultBlockSize, null);
   }
     
   /**
@@ -248,11 +248,11 @@ class DFSClient implements FSConstants {
    * @return output stream
    * @throws IOException
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite, 
-                              short replication,
-                              long blockSize
-                              ) throws IOException {
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite, 
+                             short replication,
+                             long blockSize
+                             ) throws IOException {
     return create(src, overwrite, replication, blockSize, null);
   }
 
@@ -267,12 +267,12 @@ class DFSClient implements FSConstants {
    * @return output stream
    * @throws IOException
    */
-  public OutputStream create( UTF8 src, 
-                              boolean overwrite, 
-                              short replication,
-                              long blockSize,
-                              Progressable progress
-                              ) throws IOException {
+  public OutputStream create(UTF8 src, 
+                             boolean overwrite, 
+                             short replication,
+                             long blockSize,
+                             Progressable progress
+                             ) throws IOException {
     checkOpen();
     OutputStream result = new DFSOutputStream(src, overwrite, 
                                               replication, blockSize, progress);
@@ -360,8 +360,8 @@ class DFSClient implements FSConstants {
    * 
    * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
    */
-  public boolean setSafeMode( SafeModeAction action ) throws IOException {
-    return namenode.setSafeMode( action );
+  public boolean setSafeMode(SafeModeAction action) throws IOException {
+    return namenode.setSafeMode(action);
   }
 
   /**
@@ -405,9 +405,9 @@ class DFSClient implements FSConstants {
   public void lock(UTF8 src, boolean exclusive) throws IOException {
     long start = System.currentTimeMillis();
     boolean hasLock = false;
-    while (! hasLock) {
+    while (!hasLock) {
       hasLock = namenode.obtainLock(src.toString(), clientName, exclusive);
-      if (! hasLock) {
+      if (!hasLock) {
         try {
           Thread.sleep(400);
           if (System.currentTimeMillis() - start > 5000) {
@@ -425,9 +425,9 @@ class DFSClient implements FSConstants {
    */
   public void release(UTF8 src) throws IOException {
     boolean hasReleased = false;
-    while (! hasReleased) {
+    while (!hasReleased) {
       hasReleased = namenode.releaseLock(src.toString(), clientName);
-      if (! hasReleased) {
+      if (!hasReleased) {
         LOG.info("Could not release.  Retrying...");
         try {
           Thread.sleep(2000);
@@ -464,7 +464,7 @@ class DFSClient implements FSConstants {
       while (running) {
         if (System.currentTimeMillis() - lastRenewed > (LEASE_SOFTLIMIT_PERIOD / 2)) {
           try {
-            if( pendingCreates.size() > 0 )
+            if (pendingCreates.size() > 0)
               namenode.renewLease(clientName);
             lastRenewed = System.currentTimeMillis();
           } catch (IOException ie) {
@@ -538,7 +538,7 @@ class DFSClient implements FSConstants {
 
       if (oldBlocks != null) {
         for (int i = 0; i < oldBlocks.length; i++) {
-          if (! oldBlocks[i].equals(newBlocks[i])) {
+          if (!oldBlocks[i].equals(newBlocks[i])) {
             throw new IOException("Blocklist for " + src + " has changed!");
           }
         }
@@ -912,7 +912,7 @@ class DFSClient implements FSConstants {
       deadNodes.add(currentNode);
       DatanodeInfo oldNode = currentNode;
       DatanodeInfo newNode = blockSeekTo(targetPos);
-      if ( !markedDead ) {
+      if (!markedDead) {
         /* remove it from deadNodes. blockSeekTo could have cleared 
          * deadNodes and added currentNode again. Thats ok. */
         deadNodes.remove(oldNode);
@@ -1037,7 +1037,7 @@ class DFSClient implements FSConstants {
      * filedescriptor that we don't own.
      */
     private void closeBackupStream() throws IOException {
-      if ( backupStream != null ) {
+      if (backupStream != null) {
         OutputStream stream = backupStream;
         backupStream = null;
         stream.close();
@@ -1047,7 +1047,7 @@ class DFSClient implements FSConstants {
      * twice could result in deleting a file that we should not.
      */
     private void deleteBackupFile() {
-      if ( backupFile != null ) {
+      if (backupFile != null) {
         File file = backupFile;
         backupFile = null;
         file.delete();
@@ -1081,8 +1081,8 @@ class DFSClient implements FSConstants {
         }
 
         block = lb.getBlock();
-        if ( block.getNumBytes() < bytesWrittenToBlock ) {
-          block.setNumBytes( bytesWrittenToBlock );
+        if (block.getNumBytes() < bytesWrittenToBlock) {
+          block.setNumBytes(bytesWrittenToBlock);
         }
         DatanodeInfo nodes[] = lb.getLocations();
 
@@ -1270,9 +1270,9 @@ class DFSClient implements FSConstants {
       int workingPos = Math.min(pos, maxPos);
             
       if (workingPos > 0) {
-        if ( backupStream == null ) {
-          throw new IOException( "Trying to write to backupStream " +
-                                 "but it already closed or not open");
+        if (backupStream == null) {
+          throw new IOException("Trying to write to backupStream " +
+                                "but it already closed or not open");
         }
         //
         // To the local block backup, write just the bytes
@@ -1417,7 +1417,7 @@ class DFSClient implements FSConstants {
 
         long localstart = System.currentTimeMillis();
         boolean fileComplete = false;
-        while (! fileComplete) {
+        while (!fileComplete) {
           fileComplete = namenode.complete(src.toString(), clientName.toString());
           if (!fileComplete) {
             try {

+ 2 - 2
src/java/org/apache/hadoop/dfs/DFSFileInfo.java

@@ -52,10 +52,10 @@ class DFSFileInfo implements Writable {
   /**
    * Create DFSFileInfo by file INode 
    */
-  public DFSFileInfo( FSDirectory.INode node ) {
+  public DFSFileInfo(FSDirectory.INode node) {
     this.path = new UTF8(node.computeName());
     this.isDir = node.isDir();
-    if( isDir ) {
+    if (isDir) {
       this.len = 0;
       this.contentsLen = node.computeContentsLength();
     } else 

+ 3 - 3
src/java/org/apache/hadoop/dfs/DFSck.java

@@ -108,7 +108,7 @@ public class DFSck extends ToolBase {
     URLConnection connection = path.openConnection();
     InputStream stream = connection.getInputStream();
     InputStreamReader input =
-        new InputStreamReader(stream, "UTF-8");
+      new InputStreamReader(stream, "UTF-8");
     try {
       int c = input.read();
       while (c != -1) {
@@ -122,7 +122,7 @@ public class DFSck extends ToolBase {
   }
 
   public static void main(String[] args) throws Exception {
-      int res = new DFSck().doMain(new Configuration(), args);
-      System.exit(res);
+    int res = new DFSck().doMain(new Configuration(), args);
+    System.exit(res);
   }
 }

+ 99 - 99
src/java/org/apache/hadoop/dfs/DataNode.java

@@ -193,19 +193,19 @@ public class DataNode implements FSConstants, Runnable {
    * Create the DataNode given a configuration and an array of dataDirs.
    * 'dataDirs' is where the blocks are stored.
    */
-  DataNode( Configuration conf, 
-            AbstractList<File> dataDirs ) throws IOException {
+  DataNode(Configuration conf, 
+           AbstractList<File> dataDirs) throws IOException {
     try {
-      startDataNode( conf, dataDirs );
+      startDataNode(conf, dataDirs);
     } catch (IOException ie) {
       shutdown();
       throw ie;
     }
   }
     
-  void startDataNode( Configuration conf, 
-                      AbstractList<File> dataDirs
-                      ) throws IOException {
+  void startDataNode(Configuration conf, 
+                     AbstractList<File> dataDirs
+                     ) throws IOException {
     // use configured nameserver & interface to get local hostname
     machineName = DNS.getDefaultHost(
                                      conf.get("dfs.datanode.dns.interface","default"),
@@ -223,14 +223,14 @@ public class DataNode implements FSConstants, Runnable {
     NamespaceInfo nsInfo = handshake();
 
     // read storage info, lock data dirs and transition fs state if necessary
-    StartupOption startOpt = (StartupOption)conf.get( "dfs.datanode.startup", 
-                                                      StartupOption.REGULAR );
+    StartupOption startOpt = (StartupOption)conf.get("dfs.datanode.startup", 
+                                                     StartupOption.REGULAR);
     assert startOpt != null : "Startup option must be set.";
     storage = new DataStorage();
-    storage.recoverTransitionRead( nsInfo, dataDirs, startOpt );
+    storage.recoverTransitionRead(nsInfo, dataDirs, startOpt);
       
     // initialize data node internal structure
-    this.data = new FSDataset( storage, conf );
+    this.data = new FSDataset(storage, conf);
       
     // find free port
     ServerSocket ss = null;
@@ -238,7 +238,7 @@ public class DataNode implements FSConstants, Runnable {
     String bindAddress = conf.get("dfs.datanode.bindAddress", "0.0.0.0");
     while (ss == null) {
       try {
-        ss = new ServerSocket(tmpPort,0,InetAddress.getByName(bindAddress));
+        ss = new ServerSocket(tmpPort, 0, InetAddress.getByName(bindAddress));
         LOG.info("Opened server at " + tmpPort);
       } catch (IOException ie) {
         LOG.info("Could not open server at " + tmpPort + ", trying new port");
@@ -246,10 +246,10 @@ public class DataNode implements FSConstants, Runnable {
       }
     }
     // construct registration
-    this.dnRegistration = new DatanodeRegistration( 
+    this.dnRegistration = new DatanodeRegistration(
                                                    machineName + ":" + tmpPort, 
                                                    -1,   // info port determined later
-                                                   storage );
+                                                   storage);
       
     this.dataXceiveServer = new Daemon(new DataXceiveServer(ss));
 
@@ -268,9 +268,9 @@ public class DataNode implements FSConstants, Runnable {
     this.infoServer.start();
     this.dnRegistration.infoPort = this.infoServer.getPort();
     // get network location
-    this.networkLoc = conf.get( "dfs.datanode.rack" );
-    if( networkLoc == null )  // exec network script or set the default rack
-      networkLoc = getNetworkLoc( conf );
+    this.networkLoc = conf.get("dfs.datanode.rack");
+    if (networkLoc == null)  // exec network script or set the default rack
+      networkLoc = getNetworkLoc(conf);
     // register datanode
     register();
     datanodeObject = this;
@@ -282,7 +282,7 @@ public class DataNode implements FSConstants, Runnable {
       try {
         nsInfo = namenode.versionRequest();
         break;
-      } catch( SocketTimeoutException e ) {  // namenode is busy
+      } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
         try {
           Thread.sleep(1000);
@@ -291,18 +291,18 @@ public class DataNode implements FSConstants, Runnable {
     }
     String errorMsg = null;
     // verify build version
-    if( ! nsInfo.getBuildVersion().equals( Storage.getBuildVersion() )) {
+    if (!nsInfo.getBuildVersion().equals(Storage.getBuildVersion())) {
       errorMsg = "Incompatible build versions: namenode BV = " 
         + nsInfo.getBuildVersion() + "; datanode BV = "
         + Storage.getBuildVersion();
-      LOG.fatal( errorMsg );
+      LOG.fatal(errorMsg);
       try {
-        namenode.errorReport( dnRegistration,
-                              DatanodeProtocol.NOTIFY, errorMsg );
-      } catch( SocketTimeoutException e ) {  // namenode is busy
+        namenode.errorReport(dnRegistration,
+                             DatanodeProtocol.NOTIFY, errorMsg);
+      } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
       }
-      throw new IOException( errorMsg );
+      throw new IOException(errorMsg);
     }
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
       "Data-node and name-node layout versions must be the same.";
@@ -340,21 +340,21 @@ public class DataNode implements FSConstants, Runnable {
    * @throws IOException
    */
   private void register() throws IOException {
-    while( shouldRun ) {
+    while(shouldRun) {
       try {
         // reset name to machineName. Mainly for web interface.
         dnRegistration.name = machineName + ":" + dnRegistration.getPort();
-        dnRegistration = namenode.register( dnRegistration, networkLoc );
+        dnRegistration = namenode.register(dnRegistration, networkLoc);
         break;
-      } catch( SocketTimeoutException e ) {  // namenode is busy
+      } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());
         try {
           Thread.sleep(1000);
         } catch (InterruptedException ie) {}
       }
     }
-    if( storage.getStorageID().equals("") ) {
-      storage.setStorageID( dnRegistration.getStorageID());
+    if (storage.getStorageID().equals("")) {
+      storage.setStorageID(dnRegistration.getStorageID());
       storage.writeAll();
     }
   }
@@ -390,12 +390,12 @@ public class DataNode implements FSConstants, Runnable {
     }
   }
 
-  void handleDiskError( String errMsgr ) {
-    LOG.warn( "DataNode is shutting down.\n" + errMsgr );
+  void handleDiskError(String errMsgr) {
+    LOG.warn("DataNode is shutting down.\n" + errMsgr);
     try {
       namenode.errorReport(
                            dnRegistration, DatanodeProtocol.DISK_ERROR, errMsgr);
-    } catch( IOException ignored) {              
+    } catch(IOException ignored) {              
     }
     shutdown();
   }
@@ -438,20 +438,20 @@ public class DataNode implements FSConstants, Runnable {
           // -- Total capacity
           // -- Bytes remaining
           //
-          DatanodeCommand cmd = namenode.sendHeartbeat( dnRegistration, 
-                                                        data.getCapacity(), 
-                                                        data.getRemaining(), 
-                                                        xmitsInProgress,
-                                                        xceiverCount.getValue());
+          DatanodeCommand cmd = namenode.sendHeartbeat(dnRegistration, 
+                                                       data.getCapacity(), 
+                                                       data.getRemaining(), 
+                                                       xmitsInProgress,
+                                                       xceiverCount.getValue());
           //LOG.info("Just sent heartbeat, with name " + localName);
           lastHeartbeat = now;
-          if( ! processCommand( cmd ) )
+          if (!processCommand(cmd))
             continue;
         }
             
         // check if there are newly received blocks
         Block [] blockArray=null;
-        synchronized( receivedBlockList ) {
+        synchronized(receivedBlockList) {
           if (receivedBlockList.size() > 0) {
             //
             // Send newly-received blockids to namenode
@@ -459,8 +459,8 @@ public class DataNode implements FSConstants, Runnable {
             blockArray = receivedBlockList.toArray(new Block[receivedBlockList.size()]);
           }
         }
-        if( blockArray != null ) {
-          namenode.blockReceived( dnRegistration, blockArray );
+        if (blockArray != null) {
+          namenode.blockReceived(dnRegistration, blockArray);
           synchronized (receivedBlockList) {
             for(Block b: blockArray) {
               receivedBlockList.remove(b);
@@ -475,9 +475,9 @@ public class DataNode implements FSConstants, Runnable {
           // Get back a list of local block(s) that are obsolete
           // and can be safely GC'ed.
           //
-          DatanodeCommand cmd = namenode.blockReport( dnRegistration,
-                                                      data.getBlockReport());
-          processCommand( cmd );
+          DatanodeCommand cmd = namenode.blockReport(dnRegistration,
+                                                     data.getBlockReport());
+          processCommand(cmd);
           lastBlockReport = now;
         }
             
@@ -486,7 +486,7 @@ public class DataNode implements FSConstants, Runnable {
         // or work arrives, and then iterate again.
         //
         long waitTime = heartBeatInterval - (System.currentTimeMillis() - lastHeartbeat);
-        synchronized( receivedBlockList ) {
+        synchronized(receivedBlockList) {
           if (waitTime > 0 && receivedBlockList.size() == 0) {
             try {
               receivedBlockList.wait(waitTime);
@@ -497,12 +497,12 @@ public class DataNode implements FSConstants, Runnable {
       } catch(DiskErrorException e) {
         handleDiskError(e.getLocalizedMessage());
         return;
-      } catch( RemoteException re ) {
+      } catch(RemoteException re) {
         String reClass = re.getClassName();
-        if( UnregisteredDatanodeException.class.getName().equals( reClass ) ||
-            DisallowedDatanodeException.class.getName().equals( reClass )) {
-          LOG.warn( "DataNode is shutting down: " + 
-                    StringUtils.stringifyException(re));
+        if (UnregisteredDatanodeException.class.getName().equals(reClass) ||
+            DisallowedDatanodeException.class.getName().equals(reClass)) {
+          LOG.warn("DataNode is shutting down: " + 
+                   StringUtils.stringifyException(re));
           shutdown();
           return;
         }
@@ -519,16 +519,16 @@ public class DataNode implements FSConstants, Runnable {
      * @return true if further processing may be required or false otherwise. 
      * @throws IOException
      */
-  private boolean processCommand( DatanodeCommand cmd ) throws IOException {
-    if( cmd == null )
+  private boolean processCommand(DatanodeCommand cmd) throws IOException {
+    if (cmd == null)
       return true;
-    switch( cmd.action ) {
+    switch(cmd.action) {
     case DNA_TRANSFER:
       //
       // Send a copy of a block to another datanode
       //
       BlockCommand bcmd = (BlockCommand)cmd;
-      transferBlocks( bcmd.getBlocks(), bcmd.getTargets() );
+      transferBlocks(bcmd.getBlocks(), bcmd.getTargets());
       break;
     case DNA_INVALIDATE:
       //
@@ -553,21 +553,21 @@ public class DataNode implements FSConstants, Runnable {
       storage.finalizeUpgrade();
       break;
     default:
-      LOG.warn( "Unknown DatanodeCommand action: " + cmd.action);
+      LOG.warn("Unknown DatanodeCommand action: " + cmd.action);
     }
     return true;
   }
     
-  private void transferBlocks(  Block blocks[], 
-                                DatanodeInfo xferTargets[][] 
-                                ) throws IOException {
+  private void transferBlocks( Block blocks[], 
+                               DatanodeInfo xferTargets[][] 
+                               ) throws IOException {
     for (int i = 0; i < blocks.length; i++) {
       if (!data.isValidBlock(blocks[i])) {
         String errStr = "Can't send invalid block " + blocks[i];
         LOG.info(errStr);
-        namenode.errorReport( dnRegistration, 
-                              DatanodeProtocol.INVALID_BLOCK, 
-                              errStr );
+        namenode.errorReport(dnRegistration, 
+                             DatanodeProtocol.INVALID_BLOCK, 
+                             errStr);
         break;
       }
       if (xferTargets[i].length > 0) {
@@ -689,7 +689,7 @@ public class DataNode implements FSConstants, Runnable {
         //
         // Write filelen of -1 if error
         //
-        if (! data.isValidBlock(b)) {
+        if (!data.isValidBlock(b)) {
           out.writeLong(-1);
         } else {
           //
@@ -1130,11 +1130,11 @@ public class DataNode implements FSConstants, Runnable {
   /** Start a single datanode daemon and wait for it to finish.
    *  If this thread is specifically interrupted, it will stop waiting.
    */
-  static DataNode createDataNode( String args[],
-                                  Configuration conf ) throws IOException {
-    if( conf == null )
+  static DataNode createDataNode(String args[],
+                                 Configuration conf) throws IOException {
+    if (conf == null)
       conf = new Configuration();
-    if( ! parseArguments( args, conf )) {
+    if (!parseArguments(args, conf)) {
       printUsage();
       return null;
     }
@@ -1160,21 +1160,21 @@ public class DataNode implements FSConstants, Runnable {
    * no directory from this directory list can be created.
    * @throws IOException
    */
-  static DataNode makeInstance( String[] dataDirs, Configuration conf )
+  static DataNode makeInstance(String[] dataDirs, Configuration conf)
     throws IOException {
     ArrayList<File> dirs = new ArrayList<File>();
     for (int i = 0; i < dataDirs.length; i++) {
       File data = new File(dataDirs[i]);
       try {
-        DiskChecker.checkDir( data );
+        DiskChecker.checkDir(data);
         dirs.add(data);
-      } catch( DiskErrorException e ) {
-        LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage() );
+      } catch(DiskErrorException e) {
+        LOG.warn("Invalid directory in dfs.data.dir: " + e.getMessage());
       }
     }
-    if( dirs.size() > 0 ) 
+    if (dirs.size() > 0) 
       return new DataNode(conf, dirs);
-    LOG.error("All directories in dfs.data.dir are invalid." );
+    LOG.error("All directories in dfs.data.dir are invalid.");
     return null;
   }
 
@@ -1199,45 +1199,45 @@ public class DataNode implements FSConstants, Runnable {
    * @return false if passed argements are incorrect
    */
   private static boolean parseArguments(String args[], 
-                                        Configuration conf ) {
+                                        Configuration conf) {
     int argsLen = (args == null) ? 0 : args.length;
     StartupOption startOpt = StartupOption.REGULAR;
     String networkLoc = null;
-    for( int i=0; i < argsLen; i++ ) {
+    for(int i=0; i < argsLen; i++) {
       String cmd = args[i];
-      if( "-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd) ) {
-        if( i==args.length-1 )
+      if ("-r".equalsIgnoreCase(cmd) || "--rack".equalsIgnoreCase(cmd)) {
+        if (i==args.length-1)
           return false;
         networkLoc = args[++i];
-        if( networkLoc.startsWith("-") )
+        if (networkLoc.startsWith("-"))
           return false;
-      } else if( "-rollback".equalsIgnoreCase(cmd) ) {
+      } else if ("-rollback".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.ROLLBACK;
-      } else if( "-regular".equalsIgnoreCase(cmd) ) {
+      } else if ("-regular".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.REGULAR;
       } else
         return false;
     }
-    if( networkLoc != null )
-      conf.set( "dfs.datanode.rack", NodeBase.normalize( networkLoc ));
-    conf.setObject( "dfs.datanode.startup", startOpt );
+    if (networkLoc != null)
+      conf.set("dfs.datanode.rack", NodeBase.normalize(networkLoc));
+    conf.setObject("dfs.datanode.startup", startOpt);
     return true;
   }
 
   /* Get the network location by running a script configured in conf */
-  private static String getNetworkLoc( Configuration conf ) 
+  private static String getNetworkLoc(Configuration conf) 
     throws IOException {
-    String locScript = conf.get("dfs.network.script" );
-    if( locScript == null ) 
+    String locScript = conf.get("dfs.network.script");
+    if (locScript == null) 
       return NetworkTopology.DEFAULT_RACK;
 
-    LOG.info( "Starting to run script to get datanode network location");
-    Process p = Runtime.getRuntime().exec( locScript );
+    LOG.info("Starting to run script to get datanode network location");
+    Process p = Runtime.getRuntime().exec(locScript);
     StringBuffer networkLoc = new StringBuffer();
     final BufferedReader inR = new BufferedReader(
-                                                  new InputStreamReader(p.getInputStream() ) );
+                                                  new InputStreamReader(p.getInputStream()));
     final BufferedReader errR = new BufferedReader(
-                                                   new InputStreamReader( p.getErrorStream() ) );
+                                                   new InputStreamReader(p.getErrorStream()));
 
     // read & log any error messages from the running script
     Thread errThread = new Thread() {
@@ -1248,7 +1248,7 @@ public class DataNode implements FSConstants, Runnable {
               LOG.warn("Network script error: "+errLine);
               errLine = errR.readLine();
             }
-          } catch( IOException e) {
+          } catch(IOException e) {
                     
           }
         }
@@ -1258,32 +1258,32 @@ public class DataNode implements FSConstants, Runnable {
             
       // fetch output from the process
       String line = inR.readLine();
-      while( line != null ) {
-        networkLoc.append( line );
+      while(line != null) {
+        networkLoc.append(line);
         line = inR.readLine();
       }
       try {
         // wait for the process to finish
         int returnVal = p.waitFor();
         // check the exit code
-        if( returnVal != 0 ) {
+        if (returnVal != 0) {
           throw new IOException("Process exits with nonzero status: "+locScript);
         }
       } catch (InterruptedException e) {
-        throw new IOException( e.getMessage() );
+        throw new IOException(e.getMessage());
       } finally {
         try {
           // make sure that the error thread exits
           errThread.join();
         } catch (InterruptedException je) {
-          LOG.warn( StringUtils.stringifyException(je));
+          LOG.warn(StringUtils.stringifyException(je));
         }
       }
     } finally {
       // close in & error streams
       try {
         inR.close();
-      } catch ( IOException ine ) {
+      } catch (IOException ine) {
         throw ine;
       } finally {
         errR.close();
@@ -1297,11 +1297,11 @@ public class DataNode implements FSConstants, Runnable {
    */
   public static void main(String args[]) {
     try {
-      DataNode datanode = createDataNode( args, null );
-      if( datanode != null )
+      DataNode datanode = createDataNode(args, null);
+      if (datanode != null)
         datanode.join();
-    } catch ( Throwable e ) {
-      LOG.error( StringUtils.stringifyException( e ) );
+    } catch (Throwable e) {
+      LOG.error(StringUtils.stringifyException(e));
       System.exit(-1);
     }
   }

+ 156 - 156
src/java/org/apache/hadoop/dfs/DataStorage.java

@@ -29,17 +29,17 @@ class DataStorage extends Storage {
   private String storageID;
 
   DataStorage() {
-    super( NodeType.DATA_NODE );
+    super(NodeType.DATA_NODE);
     storageID = "";
   }
   
-  DataStorage( int nsID, long cT, String strgID ) {
-    super( NodeType.DATA_NODE, nsID, cT );
+  DataStorage(int nsID, long cT, String strgID) {
+    super(NodeType.DATA_NODE, nsID, cT);
     this.storageID = strgID;
   }
   
-  DataStorage( StorageInfo storageInfo, String strgID ) {
-    super( NodeType.DATA_NODE, storageInfo );
+  DataStorage(StorageInfo storageInfo, String strgID) {
+    super(NodeType.DATA_NODE, storageInfo);
     this.storageID = strgID;
   }
 
@@ -47,7 +47,7 @@ class DataStorage extends Storage {
     return storageID;
   }
   
-  void setStorageID( String newStorageID ) {
+  void setStorageID(String newStorageID) {
     this.storageID = newStorageID;
   }
   
@@ -62,10 +62,10 @@ class DataStorage extends Storage {
    * @param startOpt startup option
    * @throws IOException
    */
-  void recoverTransitionRead( NamespaceInfo nsInfo,
-                              Collection<File> dataDirs,
-                              StartupOption startOpt
-                              ) throws IOException {
+  void recoverTransitionRead(NamespaceInfo nsInfo,
+                             Collection<File> dataDirs,
+                             StartupOption startOpt
+                             ) throws IOException {
     assert FSConstants.LAYOUT_VERSION == nsInfo.getLayoutVersion() :
       "Data-node and name-node layout versions must be the same.";
     
@@ -73,53 +73,53 @@ class DataStorage extends Storage {
     // check whether all is consistent before transitioning.
     // Format and recover.
     this.storageID = "";
-    this.storageDirs = new ArrayList<StorageDirectory>( dataDirs.size() );
-    ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>( dataDirs.size() );
-    for( Iterator<File> it = dataDirs.iterator(); it.hasNext(); ) {
+    this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
+    ArrayList<StorageState> dataDirStates = new ArrayList<StorageState>(dataDirs.size());
+    for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
       File dataDir = it.next();
-      StorageDirectory sd = new StorageDirectory( dataDir );
+      StorageDirectory sd = new StorageDirectory(dataDir);
       StorageState curState;
       try {
-        curState = sd.analyzeStorage( startOpt );
+        curState = sd.analyzeStorage(startOpt);
         // sd is locked but not opened
-        switch( curState ) {
+        switch(curState) {
         case NORMAL:
           break;
         case NON_EXISTENT:
           // ignore this storage
-          LOG.info( "Storage directory " + dataDir + " does not exist." );
+          LOG.info("Storage directory " + dataDir + " does not exist.");
           it.remove();
           continue;
         case CONVERT:
-          convertLayout( sd, nsInfo );
+          convertLayout(sd, nsInfo);
           break;
         case NOT_FORMATTED: // format
-          LOG.info( "Storage directory " + dataDir + " is not formatted." );
-          LOG.info( "Formatting ..." );
-          format( sd, nsInfo );
+          LOG.info("Storage directory " + dataDir + " is not formatted.");
+          LOG.info("Formatting ...");
+          format(sd, nsInfo);
           break;
         default:  // recovery part is common
-          sd.doRecover( curState );
+          sd.doRecover(curState);
         }
       } catch (IOException ioe) {
         sd.unlock();
         throw ioe;
       }
       // add to the storage list
-      addStorageDir( sd );
-      dataDirStates.add( curState );
+      addStorageDir(sd);
+      dataDirStates.add(curState);
     }
 
-    if( dataDirs.size() == 0 )  // none of the data dirs exist
-      throw new IOException( 
-                            "All specified directories are not accessible or do not exist." );
+    if (dataDirs.size() == 0)  // none of the data dirs exist
+      throw new IOException(
+                            "All specified directories are not accessible or do not exist.");
 
     // 2. Do transitions
     // Each storage directory is treated individually.
     // During sturtup some of them can upgrade or rollback 
     // while others could be uptodate for the regular startup.
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      doTransition( getStorageDir( idx ), nsInfo, startOpt );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      doTransition(getStorageDir(idx), nsInfo, startOpt);
       assert this.getLayoutVersion() == nsInfo.getLayoutVersion() :
         "Data-node and name-node layout versions must be the same.";
       assert this.getCTime() == nsInfo.getCTime() :
@@ -130,7 +130,7 @@ class DataStorage extends Storage {
     this.writeAll();
   }
 
-  void format( StorageDirectory sd, NamespaceInfo nsInfo ) throws IOException {
+  void format(StorageDirectory sd, NamespaceInfo nsInfo) throws IOException {
     sd.clearDirectory(); // create directory
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.namespaceID = nsInfo.getNamespaceID();
@@ -139,42 +139,42 @@ class DataStorage extends Storage {
     sd.write();
   }
 
-  protected void setFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.setFields( props, sd );
-    props.setProperty( "storageID", storageID );
+  protected void setFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.setFields(props, sd);
+    props.setProperty("storageID", storageID);
   }
 
-  protected void getFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.getFields( props, sd );
-    String ssid = props.getProperty( "storageID" );
-    if( ssid == null ||
-        ! ("".equals( storageID ) || "".equals( ssid ) ||
-           storageID.equals( ssid )))
-      throw new InconsistentFSStateException( sd.root,
-                                              "has incompatible storage Id." );
-    if( "".equals( storageID ) ) // update id only if it was empty
+  protected void getFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.getFields(props, sd);
+    String ssid = props.getProperty("storageID");
+    if (ssid == null ||
+        !("".equals(storageID) || "".equals(ssid) ||
+          storageID.equals(ssid)))
+      throw new InconsistentFSStateException(sd.root,
+                                             "has incompatible storage Id.");
+    if ("".equals(storageID)) // update id only if it was empty
       storageID = ssid;
   }
 
-  boolean isConversionNeeded( StorageDirectory sd ) throws IOException {
-    File oldF = new File( sd.root, "storage" );
-    if( ! oldF.exists() )
+  boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+    File oldF = new File(sd.root, "storage");
+    if (!oldF.exists())
       return false;
     // check consistency of the old storage
-    File oldDataDir = new File( sd.root, "data" );
-    if( ! oldDataDir.exists() ) 
-      throw new InconsistentFSStateException( sd.root,
-                                              "Old layout block directory " + oldDataDir + " is missing" ); 
-    if( ! oldDataDir.isDirectory() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldDataDir + " is not a directory." );
-    if( ! oldDataDir.canWrite() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldDataDir + " is not writable." );
+    File oldDataDir = new File(sd.root, "data");
+    if (!oldDataDir.exists()) 
+      throw new InconsistentFSStateException(sd.root,
+                                             "Old layout block directory " + oldDataDir + " is missing"); 
+    if (!oldDataDir.isDirectory())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldDataDir + " is not a directory.");
+    if (!oldDataDir.canWrite())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldDataDir + " is not writable.");
     return true;
   }
   
@@ -185,44 +185,44 @@ class DataStorage extends Storage {
    * @param nsInfo namespace information
    * @throws IOException
    */
-  private void convertLayout( StorageDirectory sd,
-                              NamespaceInfo nsInfo 
-                              ) throws IOException {
+  private void convertLayout(StorageDirectory sd,
+                             NamespaceInfo nsInfo 
+                             ) throws IOException {
     assert FSConstants.LAYOUT_VERSION < LAST_PRE_UPGRADE_LAYOUT_VERSION :
       "Bad current layout version: FSConstants.LAYOUT_VERSION should decrease";
-    File oldF = new File( sd.root, "storage" );
-    File oldDataDir = new File( sd.root, "data" );
+    File oldF = new File(sd.root, "storage");
+    File oldDataDir = new File(sd.root, "data");
     assert oldF.exists() : "Old datanode layout \"storage\" file is missing";
     assert oldDataDir.exists() : "Old layout block directory \"data\" is missing";
-    LOG.info( "Old layout version file " + oldF
-              + " is found. New layout version is "
-              + FSConstants.LAYOUT_VERSION );
-    LOG.info( "Converting ..." );
+    LOG.info("Old layout version file " + oldF
+             + " is found. New layout version is "
+             + FSConstants.LAYOUT_VERSION);
+    LOG.info("Converting ...");
     
     // Lock and Read old storage file
-    RandomAccessFile oldFile = new RandomAccessFile( oldF, "rws" );
+    RandomAccessFile oldFile = new RandomAccessFile(oldF, "rws");
     if (oldFile == null)
-      throw new IOException( "Cannot read file: " + oldF );
+      throw new IOException("Cannot read file: " + oldF);
     FileLock oldLock = oldFile.getChannel().tryLock();
     if (oldLock == null)
-      throw new IOException( "Cannot lock file: " + oldF );
+      throw new IOException("Cannot lock file: " + oldF);
     try {
       oldFile.seek(0);
       int odlVersion = oldFile.readInt();
-      if( odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION )
-        throw new IncorrectVersionException( odlVersion, "file " + oldF,
-                                             LAST_PRE_UPGRADE_LAYOUT_VERSION );
-      String odlStorageID = org.apache.hadoop.io.UTF8.readString( oldFile );
+      if (odlVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION)
+        throw new IncorrectVersionException(odlVersion, "file " + oldF,
+                                            LAST_PRE_UPGRADE_LAYOUT_VERSION);
+      String odlStorageID = org.apache.hadoop.io.UTF8.readString(oldFile);
   
       // check new storage
       File newDataDir = sd.getCurrentDir();
       File versionF = sd.getVersionFile();
-      if( versionF.exists() )
-        throw new IOException( "Version file already exists: " + versionF );
-      if( newDataDir.exists() ) // somebody created current dir manually
-        deleteDir( newDataDir );
+      if (versionF.exists())
+        throw new IOException("Version file already exists: " + versionF);
+      if (newDataDir.exists()) // somebody created current dir manually
+        deleteDir(newDataDir);
       // Write new layout
-      rename( oldDataDir, newDataDir );
+      rename(oldDataDir, newDataDir);
   
       this.layoutVersion = FSConstants.LAYOUT_VERSION;
       this.namespaceID = nsInfo.getNamespaceID();
@@ -235,8 +235,8 @@ class DataStorage extends Storage {
       oldFile.close();
     }
     // move old storage file into current dir
-    rename( oldF, new File( sd.getCurrentDir(), "storage" ));
-    LOG.info( "Conversion of " + oldF + " is complete." );
+    rename(oldF, new File(sd.getCurrentDir(), "storage"));
+    LOG.info("Conversion of " + oldF + " is complete.");
   }
 
   /**
@@ -252,26 +252,26 @@ class DataStorage extends Storage {
    * @param startOpt  startup option
    * @throws IOException
    */
-  private void doTransition(  StorageDirectory sd, 
-                              NamespaceInfo nsInfo, 
-                              StartupOption startOpt
-                              ) throws IOException {
-    if( startOpt == StartupOption.ROLLBACK )
-      doRollback( sd, nsInfo ); // rollback if applicable
+  private void doTransition( StorageDirectory sd, 
+                             NamespaceInfo nsInfo, 
+                             StartupOption startOpt
+                             ) throws IOException {
+    if (startOpt == StartupOption.ROLLBACK)
+      doRollback(sd, nsInfo); // rollback if applicable
     sd.read();
     assert this.layoutVersion >= FSConstants.LAYOUT_VERSION :
       "Future version is not allowed";
-    if( getNamespaceID() != nsInfo.getNamespaceID() )
-      throw new IOException( 
+    if (getNamespaceID() != nsInfo.getNamespaceID())
+      throw new IOException(
                             "Incompatible namespaceIDs in " + sd.root.getCanonicalPath()
                             + ": namenode namespaceID = " + nsInfo.getNamespaceID() 
-                            + "; datanode namespaceID = " + getNamespaceID() );
-    if( this.layoutVersion == FSConstants.LAYOUT_VERSION 
-        && this.cTime == nsInfo.getCTime() )
+                            + "; datanode namespaceID = " + getNamespaceID());
+    if (this.layoutVersion == FSConstants.LAYOUT_VERSION 
+        && this.cTime == nsInfo.getCTime())
       return; // regular startup
-    if( this.layoutVersion > FSConstants.LAYOUT_VERSION
-        || this.cTime < nsInfo.getCTime() ) {
-      doUpgrade( sd, nsInfo );  // upgrade
+    if (this.layoutVersion > FSConstants.LAYOUT_VERSION
+        || this.cTime < nsInfo.getCTime()) {
+      doUpgrade(sd, nsInfo);  // upgrade
       return;
     }
     // layoutVersion == LAYOUT_VERSION && this.cTime > nsInfo.cTime
@@ -280,7 +280,7 @@ class DataStorage extends Storage {
                           + " CTime = " + this.getCTime() 
                           + " is newer than the namespace state: LV = "
                           + nsInfo.getLayoutVersion() 
-                          + " CTime = " + nsInfo.getCTime() );
+                          + " CTime = " + nsInfo.getCTime());
   }
 
   /**
@@ -290,26 +290,26 @@ class DataStorage extends Storage {
    * @param sd  storage directory
    * @throws IOException
    */
-  void doUpgrade( StorageDirectory sd,
-                  NamespaceInfo nsInfo
-                  ) throws IOException {
-    LOG.info( "Upgrading storage directory " + sd.root 
-              + ".\n   old LV = " + this.getLayoutVersion()
-              + "; old CTime = " + this.getCTime()
-              + ".\n   new LV = " + nsInfo.getLayoutVersion()
-              + "; new CTime = " + nsInfo.getCTime() );
+  void doUpgrade(StorageDirectory sd,
+                 NamespaceInfo nsInfo
+                 ) throws IOException {
+    LOG.info("Upgrading storage directory " + sd.root 
+             + ".\n   old LV = " + this.getLayoutVersion()
+             + "; old CTime = " + this.getCTime()
+             + ".\n   new LV = " + nsInfo.getLayoutVersion()
+             + "; new CTime = " + nsInfo.getCTime());
     File curDir = sd.getCurrentDir();
     File prevDir = sd.getPreviousDir();
     assert curDir.exists() : "Current directory must exist.";
     // delete previous dir before upgrading
-    if( prevDir.exists() )
-      deleteDir( prevDir );
+    if (prevDir.exists())
+      deleteDir(prevDir);
     File tmpDir = sd.getPreviousTmp();
-    assert ! tmpDir.exists() : "previous.tmp directory must not exist.";
+    assert !tmpDir.exists() : "previous.tmp directory must not exist.";
     // rename current to tmp
-    rename( curDir, tmpDir );
+    rename(curDir, tmpDir);
     // hardlink blocks
-    linkBlocks( tmpDir, curDir );
+    linkBlocks(tmpDir, curDir);
     // write version file
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     assert this.namespaceID == nsInfo.getNamespaceID() :
@@ -317,69 +317,69 @@ class DataStorage extends Storage {
     this.cTime = nsInfo.getCTime();
     sd.write();
     // rename tmp to previous
-    rename( tmpDir, prevDir );
-    LOG.info( "Upgrade of " + sd.root + " is complete." );
+    rename(tmpDir, prevDir);
+    LOG.info("Upgrade of " + sd.root + " is complete.");
   }
 
-  void doRollback(  StorageDirectory sd,
-                    NamespaceInfo nsInfo
-                    ) throws IOException {
+  void doRollback( StorageDirectory sd,
+                   NamespaceInfo nsInfo
+                   ) throws IOException {
     File prevDir = sd.getPreviousDir();
     // regular startup if previous dir does not exist
-    if( ! prevDir.exists() )
+    if (!prevDir.exists())
       return;
     DataStorage prevInfo = new DataStorage();
-    StorageDirectory prevSD = prevInfo.new StorageDirectory( sd.root );
-    prevSD.read( prevSD.getPreviousVersionFile() );
+    StorageDirectory prevSD = prevInfo.new StorageDirectory(sd.root);
+    prevSD.read(prevSD.getPreviousVersionFile());
 
     // We allow rollback to a state, which is either consistent with
     // the namespace state or can be further upgraded to it.
-    if( ! ( prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
-            && prevInfo.getCTime() <= nsInfo.getCTime() ))  // cannot rollback
-      throw new InconsistentFSStateException( prevSD.root,
-                                              "Cannot rollback to a newer state.\nDatanode previous state: LV = " 
-                                              + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() 
-                                              + " is newer than the namespace state: LV = "
-                                              + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime() );
-    LOG.info( "Rolling back storage directory " + sd.root 
-              + ".\n   target LV = " + nsInfo.getLayoutVersion()
-              + "; target CTime = " + nsInfo.getCTime() );
+    if (!(prevInfo.getLayoutVersion() >= FSConstants.LAYOUT_VERSION
+          && prevInfo.getCTime() <= nsInfo.getCTime()))  // cannot rollback
+      throw new InconsistentFSStateException(prevSD.root,
+                                             "Cannot rollback to a newer state.\nDatanode previous state: LV = " 
+                                             + prevInfo.getLayoutVersion() + " CTime = " + prevInfo.getCTime() 
+                                             + " is newer than the namespace state: LV = "
+                                             + nsInfo.getLayoutVersion() + " CTime = " + nsInfo.getCTime());
+    LOG.info("Rolling back storage directory " + sd.root 
+             + ".\n   target LV = " + nsInfo.getLayoutVersion()
+             + "; target CTime = " + nsInfo.getCTime());
     File tmpDir = sd.getRemovedTmp();
-    assert ! tmpDir.exists() : "removed.tmp directory must not exist.";
+    assert !tmpDir.exists() : "removed.tmp directory must not exist.";
     // rename current to tmp
     File curDir = sd.getCurrentDir();
     assert curDir.exists() : "Current directory must exist.";
-    rename( curDir, tmpDir );
+    rename(curDir, tmpDir);
     // rename previous to current
-    rename( prevDir, curDir );
+    rename(prevDir, curDir);
     // delete tmp dir
-    deleteDir( tmpDir );
-    LOG.info( "Rollback of " + sd.root + " is complete." );
+    deleteDir(tmpDir);
+    LOG.info("Rollback of " + sd.root + " is complete.");
   }
 
-  void doFinalize( StorageDirectory sd ) throws IOException {
+  void doFinalize(StorageDirectory sd) throws IOException {
     File prevDir = sd.getPreviousDir();
-    if( ! prevDir.exists() )
+    if (!prevDir.exists())
       return; // already discarded
     final String dataDirPath = sd.root.getCanonicalPath();
-    LOG.info( "Finalizing upgrade for storage directory " 
-              + dataDirPath 
-              + ".\n   cur LV = " + this.getLayoutVersion()
-              + "; cur CTime = " + this.getCTime() );
+    LOG.info("Finalizing upgrade for storage directory " 
+             + dataDirPath 
+             + ".\n   cur LV = " + this.getLayoutVersion()
+             + "; cur CTime = " + this.getCTime());
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     final File tmpDir = sd.getFinalizedTmp();
     // rename previous to tmp
-    rename( prevDir, tmpDir );
+    rename(prevDir, tmpDir);
 
     // delete tmp dir in a separate thread
-    new Daemon( new Runnable() {
+    new Daemon(new Runnable() {
         public void run() {
           try {
-            deleteDir( tmpDir );
-          } catch( IOException ex ) {
-            LOG.error( "Finalize upgrade for " + dataDirPath + " failed.", ex );
+            deleteDir(tmpDir);
+          } catch(IOException ex) {
+            LOG.error("Finalize upgrade for " + dataDirPath + " failed.", ex);
           }
-          LOG.info( "Finalize upgrade for " + dataDirPath + " is complete." );
+          LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
         }
         public String toString() { return "Finalize " + dataDirPath; }
       }).start();
@@ -387,26 +387,26 @@ class DataStorage extends Storage {
   
   void finalizeUpgrade() throws IOException {
     for (Iterator<StorageDirectory> it = storageDirs.iterator(); it.hasNext();) {
-      doFinalize( it.next() );
+      doFinalize(it.next());
     }
   }
   
-  static void linkBlocks( File from, File to ) throws IOException {
-    if( ! from.isDirectory() ) {
-      HardLink.createHardLink( from, to );
+  static void linkBlocks(File from, File to) throws IOException {
+    if (!from.isDirectory()) {
+      HardLink.createHardLink(from, to);
       return;
     }
     // from is a directory
-    if( ! to.mkdir() )
-      throw new IOException("Cannot create directory " + to );
-    String[] blockNames = from.list( new java.io.FilenameFilter() {
+    if (!to.mkdir())
+      throw new IOException("Cannot create directory " + to);
+    String[] blockNames = from.list(new java.io.FilenameFilter() {
         public boolean accept(File dir, String name) {
-          return name.startsWith( BLOCK_SUBDIR_PREFIX ) 
-            || name.startsWith( BLOCK_FILE_PREFIX );
+          return name.startsWith(BLOCK_SUBDIR_PREFIX) 
+            || name.startsWith(BLOCK_FILE_PREFIX);
         }
       });
     
-    for( int i = 0; i < blockNames.length; i++ )
-      linkBlocks( new File(from, blockNames[i]), new File(to, blockNames[i]) );
+    for(int i = 0; i < blockNames.length; i++)
+      linkBlocks(new File(from, blockNames[i]), new File(to, blockNames[i]));
   }
 }

+ 23 - 23
src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java

@@ -60,8 +60,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
   /** DatanodeDescriptor constructor
    * @param nodeID id of the data node
    */
-  public DatanodeDescriptor( DatanodeID nodeID ) {
-    this( nodeID, 0L, 0L, 0 );
+  public DatanodeDescriptor(DatanodeID nodeID) {
+    this(nodeID, 0L, 0L, 0);
   }
 
   /** DatanodeDescriptor constructor
@@ -69,9 +69,9 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param nodeID id of the data node
    * @param networkLocation location of the data node in network
    */
-  public DatanodeDescriptor( DatanodeID nodeID, 
-                             String networkLocation ) {
-    this( nodeID, networkLocation, null );
+  public DatanodeDescriptor(DatanodeID nodeID, 
+                            String networkLocation) {
+    this(nodeID, networkLocation, null);
   }
   
   /** DatanodeDescriptor constructor
@@ -80,10 +80,10 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param networkLocation location of the data node in network
    * @param hostName it could be different from host specified for DatanodeID
    */
-  public DatanodeDescriptor( DatanodeID nodeID, 
-                             String networkLocation,
-                             String hostName ) {
-    this( nodeID, networkLocation, hostName, 0L, 0L, 0 );
+  public DatanodeDescriptor(DatanodeID nodeID, 
+                            String networkLocation,
+                            String hostName) {
+    this(nodeID, networkLocation, hostName, 0L, 0L, 0);
   }
   
   /** DatanodeDescriptor constructor
@@ -93,11 +93,11 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param remaining remaing capacity of the data node
    * @param xceiverCount # of data transfers at the data node
    */
-  public DatanodeDescriptor( DatanodeID nodeID, 
-                             long capacity, 
-                             long remaining,
-                             int xceiverCount ) {
-    super( nodeID );
+  public DatanodeDescriptor(DatanodeID nodeID, 
+                            long capacity, 
+                            long remaining,
+                            int xceiverCount) {
+    super(nodeID);
     updateHeartbeat(capacity, remaining, xceiverCount);
     initWorkLists();
   }
@@ -110,14 +110,14 @@ public class DatanodeDescriptor extends DatanodeInfo {
    * @param remaining remaing capacity of the data node
    * @param xceiverCount # of data transfers at the data node
    */
-  public DatanodeDescriptor( DatanodeID nodeID,
-                             String networkLocation,
-                             String hostName,
-                             long capacity, 
-                             long remaining,
-                             int xceiverCount ) {
-    super( nodeID, networkLocation, hostName );
-    updateHeartbeat( capacity, remaining, xceiverCount);
+  public DatanodeDescriptor(DatanodeID nodeID,
+                            String networkLocation,
+                            String hostName,
+                            long capacity, 
+                            long remaining,
+                            int xceiverCount) {
+    super(nodeID, networkLocation, hostName);
+    updateHeartbeat(capacity, remaining, xceiverCount);
     initWorkLists();
   }
 
@@ -169,7 +169,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   }
   
   Block getBlock(long blockId) {
-    return blocks.get( new Block(blockId, 0) );
+    return blocks.get(new Block(blockId, 0));
   }
   
   Block getBlock(Block b) {

+ 7 - 7
src/java/org/apache/hadoop/dfs/DatanodeID.java

@@ -24,7 +24,7 @@ public class DatanodeID implements WritableComparable {
    * DatanodeID default constructor
    */
   public DatanodeID() {
-    this( new String(), new String(), -1 );
+    this(new String(), new String(), -1);
   }
 
   /**
@@ -32,8 +32,8 @@ public class DatanodeID implements WritableComparable {
    * 
    * @param from
    */
-  public DatanodeID( DatanodeID from ) {
-    this( from.getName(), from.getStorageID(), from.getInfoPort() );
+  public DatanodeID(DatanodeID from) {
+    this(from.getName(), from.getStorageID(), from.getInfoPort());
   }
   
   /**
@@ -42,7 +42,7 @@ public class DatanodeID implements WritableComparable {
    * @param nodeName (hostname:portNumber) 
    * @param storageID data storage ID
    */
-  public DatanodeID( String nodeName, String storageID, int infoPort ) {
+  public DatanodeID(String nodeName, String storageID, int infoPort) {
     this.name = nodeName;
     this.storageID = storageID;
     this.infoPort = infoPort;
@@ -90,13 +90,13 @@ public class DatanodeID implements WritableComparable {
   
   public int getPort() {
     int colon = name.indexOf(":");
-    if ( colon < 0 ) {
+    if (colon < 0) {
       return 50010; // default port.
     }
     return Integer.parseInt(name.substring(colon+1));
   }
 
-  public boolean equals( Object to ) {
+  public boolean equals(Object to) {
     return (name.equals(((DatanodeID)to).getName()) &&
             storageID.equals(((DatanodeID)to).getStorageID()));
   }
@@ -113,7 +113,7 @@ public class DatanodeID implements WritableComparable {
    * Update fields when a new registration request comes in.
    * Note that this does not update storageID.
    */
-  void updateRegInfo( DatanodeID nodeReg ) {
+  void updateRegInfo(DatanodeID nodeReg) {
     name = nodeReg.getName();
     infoPort = nodeReg.getInfoPort();
     // update any more fields added in future.

+ 16 - 16
src/java/org/apache/hadoop/dfs/DatanodeInfo.java

@@ -63,8 +63,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
     adminState = null;
   }
   
-  DatanodeInfo( DatanodeInfo from ) {
-    super( from );
+  DatanodeInfo(DatanodeInfo from) {
+    super(from);
     this.capacity = from.getCapacity();
     this.remaining = from.getRemaining();
     this.lastUpdate = from.getLastUpdate();
@@ -74,8 +74,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.hostName = from.hostName;
   }
 
-  DatanodeInfo( DatanodeID nodeID ) {
-    super( nodeID );
+  DatanodeInfo(DatanodeID nodeID) {
+    super(nodeID);
     this.capacity = 0L;
     this.remaining = 0L;
     this.lastUpdate = 0L;
@@ -83,7 +83,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.adminState = null;    
   }
   
-  DatanodeInfo( DatanodeID nodeID, String location, String hostName ) {
+  DatanodeInfo(DatanodeID nodeID, String location, String hostName) {
     this(nodeID);
     this.location = location;
     this.hostName = hostName;
@@ -135,10 +135,10 @@ public class DatanodeInfo extends DatanodeID implements Node {
 
   
   public String getHostName() {
-    return ( hostName == null || hostName.length()==0 ) ? getHost() : hostName;
+    return (hostName == null || hostName.length()==0) ? getHost() : hostName;
   }
   
-  public void setHostName( String host ) {
+  public void setHostName(String host) {
     hostName = host;
   }
   
@@ -149,7 +149,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long r = getRemaining();
     long u = c - r;
     buffer.append("Name: "+name+"\n");
-    if(!NetworkTopology.DEFAULT_RACK.equals(location)) {
+    if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append("Rack: "+location+"\n");
     }
     if (isDecommissioned()) {
@@ -161,7 +161,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     }
     buffer.append("Total raw bytes: "+c+" ("+FsShell.byteDesc(c)+")"+"\n");
     buffer.append("Used raw bytes: "+u+" ("+FsShell.byteDesc(u)+")"+"\n");
-    buffer.append("% used: "+FsShell.limitDecimal(((1.0*u)/c)*100,2)+"%"+"\n");
+    buffer.append("% used: "+FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%"+"\n");
     buffer.append("Last contact: "+new Date(lastUpdate)+"\n");
     return buffer.toString();
   }
@@ -173,7 +173,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     long r = getRemaining();
     long u = c - r;
     buffer.append(name);
-    if(!NetworkTopology.DEFAULT_RACK.equals(location)) {
+    if (!NetworkTopology.DEFAULT_RACK.equals(location)) {
       buffer.append(" "+location);
     }
     if (isDecommissioned()) {
@@ -185,7 +185,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     }
     buffer.append(" " + c + "(" + FsShell.byteDesc(c)+")");
     buffer.append(" " + u + "(" + FsShell.byteDesc(u)+")");
-    buffer.append(" " + FsShell.limitDecimal(((1.0*u)/c)*100,2)+"%");
+    buffer.append(" " + FsShell.limitDecimal(((1.0*u)/c)*100, 2)+"%");
     buffer.append(" " + new Date(lastUpdate));
     return buffer.toString();
   }
@@ -260,13 +260,13 @@ public class DatanodeInfo extends DatanodeID implements Node {
 
   /** Return this node's parent */
   public Node getParent() { return parent; }
-  public void setParent( Node parent ) {this.parent = parent;}
+  public void setParent(Node parent) {this.parent = parent;}
    
   /** Return this node's level in the tree.
    * E.g. the root of a tree returns 0 and its children return 1
    */
   public int getLevel() { return level; }
-  public void setLevel( int level) {this.level = level;}
+  public void setLevel(int level) {this.level = level;}
 
   /////////////////////////////////////////////////
   // Writable
@@ -282,12 +282,12 @@ public class DatanodeInfo extends DatanodeID implements Node {
   /**
    */
   public void write(DataOutput out) throws IOException {
-    super.write( out );
+    super.write(out);
     out.writeLong(capacity);
     out.writeLong(remaining);
     out.writeLong(lastUpdate);
     out.writeInt(xceiverCount);
-    Text.writeString( out, location );
+    Text.writeString(out, location);
     WritableUtils.writeEnum(out, getAdminState());
   }
 
@@ -299,7 +299,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
     this.remaining = in.readLong();
     this.lastUpdate = in.readLong();
     this.xceiverCount = in.readInt();
-    this.location = Text.readString( in );
+    this.location = Text.readString(in);
     AdminStates newState = (AdminStates) WritableUtils.readEnum(in,
                                                                 AdminStates.class);
     setAdminState(newState);

+ 9 - 9
src/java/org/apache/hadoop/dfs/DatanodeProtocol.java

@@ -64,9 +64,9 @@ interface DatanodeProtocol extends VersionedProtocol {
    * new storageID if the datanode did not have one and
    * registration ID for further communication.
    */
-  public DatanodeRegistration register( DatanodeRegistration registration,
-                                        String networkLocation
-                                        ) throws IOException;
+  public DatanodeRegistration register(DatanodeRegistration registration,
+                                       String networkLocation
+                                       ) throws IOException;
   /**
    * sendHeartbeat() tells the NameNode that the DataNode is still
    * alive and well.  Includes some status info, too. 
@@ -74,10 +74,10 @@ interface DatanodeProtocol extends VersionedProtocol {
    * A DatanodeCommand tells the DataNode to invalidate local block(s), 
    * or to copy them to other DataNodes, etc.
    */
-  public DatanodeCommand sendHeartbeat( DatanodeRegistration registration,
-                                        long capacity, long remaining,
-                                        int xmitsInProgress,
-                                        int xceiverCount) throws IOException;
+  public DatanodeCommand sendHeartbeat(DatanodeRegistration registration,
+                                       long capacity, long remaining,
+                                       int xmitsInProgress,
+                                       int xceiverCount) throws IOException;
 
   /**
    * blockReport() tells the NameNode about all the locally-stored blocks.
@@ -86,8 +86,8 @@ interface DatanodeProtocol extends VersionedProtocol {
    * the locally-stored blocks.  It's invoked upon startup and then
    * infrequently afterwards.
    */
-  public DatanodeCommand blockReport( DatanodeRegistration registration,
-                                      Block blocks[]) throws IOException;
+  public DatanodeCommand blockReport(DatanodeRegistration registration,
+                                     Block blocks[]) throws IOException;
     
   /**
    * blockReceived() allows the DataNode to tell the NameNode about

+ 9 - 9
src/java/org/apache/hadoop/dfs/DatanodeRegistration.java

@@ -31,7 +31,7 @@ class DatanodeRegistration extends DatanodeID implements Writable {
    * Default constructor.
    */
   public DatanodeRegistration() {
-    super( null, null, -1 );
+    super(null, null, -1);
     this.storageInfo = new StorageInfo();
   }
   
@@ -40,9 +40,9 @@ class DatanodeRegistration extends DatanodeID implements Writable {
    */
   public DatanodeRegistration(String nodeName, 
                               int infoPort,
-                              DataStorage storage ) {
-    super( nodeName, storage.getStorageID(), infoPort );
-    this.storageInfo = new StorageInfo( storage );
+                              DataStorage storage) {
+    super(nodeName, storage.getStorageID(), infoPort);
+    this.storageInfo = new StorageInfo(storage);
   }
 
   /**
@@ -54,7 +54,7 @@ class DatanodeRegistration extends DatanodeID implements Writable {
   /**
    */
   public String getRegistrationID() {
-    return Storage.getRegistrationID( storageInfo );
+    return Storage.getRegistrationID(storageInfo);
   }
 
   /////////////////////////////////////////////////
@@ -63,10 +63,10 @@ class DatanodeRegistration extends DatanodeID implements Writable {
   /**
    */
   public void write(DataOutput out) throws IOException {
-    super.write( out );
-    out.writeInt( storageInfo.getLayoutVersion() );
-    out.writeInt( storageInfo.getNamespaceID() );
-    out.writeLong( storageInfo.getCTime() );
+    super.write(out);
+    out.writeInt(storageInfo.getLayoutVersion());
+    out.writeInt(storageInfo.getNamespaceID());
+    out.writeLong(storageInfo.getCTime());
   }
 
   /**

+ 2 - 2
src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java

@@ -12,7 +12,7 @@ import java.io.IOException;
  */
 class DisallowedDatanodeException extends IOException {
 
-  public DisallowedDatanodeException( DatanodeID nodeID ) {
-    super("Datanode denied communication with namenode: " + nodeID.getName() );
+  public DisallowedDatanodeException(DatanodeID nodeID) {
+    super("Datanode denied communication with namenode: " + nodeID.getName());
   }
 }

+ 16 - 16
src/java/org/apache/hadoop/dfs/DistributedFileSystem.java

@@ -64,7 +64,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
       setConf(conf);
       String host = uri.getHost();
       int port = uri.getPort();
-      this.dfs = new DFSClient(new InetSocketAddress(host,port), conf);
+      this.dfs = new DFSClient(new InetSocketAddress(host, port), conf);
       this.uri = URI.create("hdfs://"+host+":"+port);
       this.localFs = getNamed("file:///", conf);
     }
@@ -122,7 +122,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
     }
 
     public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-      if (! exists(f)) {
+      if (!exists(f)) {
         throw new FileNotFoundException(f.toString());
       }
 
@@ -132,7 +132,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
     public FSDataOutputStream create(Path f, boolean overwrite,
                                      int bufferSize, short replication, long blockSize,
                                      Progressable progress) throws IOException {
-      if (exists(f) && ! overwrite) {
+      if (exists(f) && !overwrite) {
         throw new IOException("File already exists:"+f);
       }
       Path parent = f.getParent();
@@ -146,9 +146,9 @@ public class DistributedFileSystem extends ChecksumFileSystem {
                                     bufferSize);
     }
     
-    public boolean setReplication( Path src, 
-                                   short replication
-                                   ) throws IOException {
+    public boolean setReplication(Path src, 
+                                  short replication
+                                  ) throws IOException {
       return dfs.setReplication(getPath(src), replication);
     }
     
@@ -223,7 +223,7 @@ public class DistributedFileSystem extends ChecksumFileSystem {
 
     /** @deprecated */ @Deprecated
       public void lock(Path f, boolean shared) throws IOException {
-      dfs.lock(getPath(f), ! shared);
+      dfs.lock(getPath(f), !shared);
     }
 
     /** @deprecated */ @Deprecated
@@ -232,13 +232,13 @@ public class DistributedFileSystem extends ChecksumFileSystem {
     }
 
     @Override
-      public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
       throws IOException {
       FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
     }
 
     @Override
-      public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
       throws IOException {
       FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
     }
@@ -290,9 +290,9 @@ public class DistributedFileSystem extends ChecksumFileSystem {
      *  
      * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
      */
-    public boolean setSafeMode( FSConstants.SafeModeAction action ) 
+    public boolean setSafeMode(FSConstants.SafeModeAction action) 
       throws IOException {
-      return dfs.setSafeMode( action );
+      return dfs.setSafeMode(action);
     }
 
     /*
@@ -368,17 +368,17 @@ public class DistributedFileSystem extends ChecksumFileSystem {
   }
 
   public DistributedFileSystem() {
-    super( new RawDistributedFileSystem() );
+    super(new RawDistributedFileSystem());
   }
 
   /** @deprecated */
   public DistributedFileSystem(InetSocketAddress namenode,
                                Configuration conf) throws IOException {
-    super( new RawDistributedFileSystem(namenode, conf) );
+    super(new RawDistributedFileSystem(namenode, conf));
   }
 
   @Override
-    public long getContentLength(Path f) throws IOException {
+  public long getContentLength(Path f) throws IOException {
     return fs.getContentLength(f);
   }
 
@@ -404,9 +404,9 @@ public class DistributedFileSystem extends ChecksumFileSystem {
    *  
    * @see org.apache.hadoop.dfs.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
    */
-  public boolean setSafeMode( FSConstants.SafeModeAction action ) 
+  public boolean setSafeMode(FSConstants.SafeModeAction action) 
     throws IOException {
-    return ((RawDistributedFileSystem)fs).setSafeMode( action );
+    return ((RawDistributedFileSystem)fs).setSafeMode(action);
   }
 
   /*

+ 45 - 45
src/java/org/apache/hadoop/dfs/FSDataset.java

@@ -50,8 +50,8 @@ class FSDataset implements FSConstants {
       throws IOException {
       this.dir = dir;
       this.children = null;
-      if (! dir.exists()) {
-        if (! dir.mkdirs()) {
+      if (!dir.exists()) {
+        if (!dir.mkdirs()) {
           throw new IOException("Mkdirs failed to create " + 
                                 dir.toString());
         }
@@ -78,14 +78,14 @@ class FSDataset implements FSConstants {
       }
     }
         
-    public File addBlock( Block b, File src ) throws IOException {
+    public File addBlock(Block b, File src) throws IOException {
       //First try without creating subdirectories
-      File file = addBlock( b, src, false, false );          
-      return ( file != null ) ? file : addBlock( b, src, true, true );
+      File file = addBlock(b, src, false, false);          
+      return (file != null) ? file : addBlock(b, src, true, true);
     }
 
-    private File addBlock( Block b, File src, boolean createOk, 
-                           boolean resetIdx ) throws IOException {
+    private File addBlock(Block b, File src, boolean createOk, 
+                          boolean resetIdx) throws IOException {
       if (numBlocks < maxBlocksPerDir) {
         File dest = new File(dir, b.getBlockName());
         src.renameTo(dest);
@@ -93,17 +93,17 @@ class FSDataset implements FSConstants {
         return dest;
       }
             
-      if ( lastChildIdx < 0 && resetIdx ) {
+      if (lastChildIdx < 0 && resetIdx) {
         //reset so that all children will be checked
-        lastChildIdx = random.nextInt( children.length );              
+        lastChildIdx = random.nextInt(children.length);              
       }
             
-      if ( lastChildIdx >= 0 && children != null ) {
+      if (lastChildIdx >= 0 && children != null) {
         //Check if any child-tree has room for a block.
         for (int i=0; i < children.length; i++) {
-          int idx = ( lastChildIdx + i )%children.length;
-          File file = children[idx].addBlock( b, src, false, resetIdx );
-          if ( file != null ) {
+          int idx = (lastChildIdx + i)%children.length;
+          File file = children[idx].addBlock(b, src, false, resetIdx);
+          if (file != null) {
             lastChildIdx = idx;
             return file; 
           }
@@ -111,20 +111,20 @@ class FSDataset implements FSConstants {
         lastChildIdx = -1;
       }
             
-      if ( !createOk ) {
+      if (!createOk) {
         return null;
       }
             
-      if ( children == null || children.length == 0 ) {
+      if (children == null || children.length == 0) {
         children = new FSDir[maxBlocksPerDir];
         for (int idx = 0; idx < maxBlocksPerDir; idx++) {
-          children[idx] = new FSDir( new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx) );
+          children[idx] = new FSDir(new File(dir, DataStorage.BLOCK_SUBDIR_PREFIX+idx));
         }
       }
             
       //now pick a child randomly for creating a new set of subdirs.
-      lastChildIdx = random.nextInt( children.length );
-      return children[ lastChildIdx ].addBlock( b, src, true, false ); 
+      lastChildIdx = random.nextInt(children.length);
+      return children[ lastChildIdx ].addBlock(b, src, true, false); 
     }
 
     /**
@@ -194,13 +194,13 @@ class FSDataset implements FSConstants {
     void clearPath(File f) {
       String root = dir.getAbsolutePath();
       String dir = f.getAbsolutePath();
-      if ( dir.startsWith( root ) ) {
-        String[] dirNames = dir.substring( root.length() ).
-          split( File.separator + "subdir" );
-        if ( clearPath( f, dirNames, 1 ) )
+      if (dir.startsWith(root)) {
+        String[] dirNames = dir.substring(root.length()).
+          split(File.separator + "subdir");
+        if (clearPath(f, dirNames, 1))
           return;
       }
-      clearPath( f, null, -1 );
+      clearPath(f, null, -1);
     }
         
     /*
@@ -211,33 +211,33 @@ class FSDataset implements FSConstants {
      * children in common case. If directory structure changes 
      * in later versions, we need to revisit this.
      */
-    private boolean clearPath( File f, String[] dirNames, int idx ) {
-      if ( ( dirNames == null || idx == dirNames.length ) &&
-           dir.compareTo(f) == 0) {
+    private boolean clearPath(File f, String[] dirNames, int idx) {
+      if ((dirNames == null || idx == dirNames.length) &&
+          dir.compareTo(f) == 0) {
         numBlocks--;
         return true;
       }
           
-      if ( dirNames != null ) {
+      if (dirNames != null) {
         //guess the child index from the directory name
-        if ( idx > ( dirNames.length - 1 ) || children == null ) {
+        if (idx > (dirNames.length - 1) || children == null) {
           return false;
         }
         int childIdx; 
         try {
-          childIdx = Integer.parseInt( dirNames[idx] );
-        } catch ( NumberFormatException ignored ) {
+          childIdx = Integer.parseInt(dirNames[idx]);
+        } catch (NumberFormatException ignored) {
           // layout changed? we could print a warning.
           return false;
         }
-        return ( childIdx >= 0 && childIdx < children.length ) ?
-          children[childIdx].clearPath( f, dirNames, idx+1 ) : false;
+        return (childIdx >= 0 && childIdx < children.length) ?
+          children[childIdx].clearPath(f, dirNames, idx+1) : false;
       }
 
       //guesses failed. back to blind iteration.
-      if ( children != null ) {
+      if (children != null) {
         for(int i=0; i < children.length; i++) {
-          if ( children[i].clearPath( f, null, -1 ) ){
+          if (children[i].clearPath(f, null, -1)){
             return true;
           }
         }
@@ -262,12 +262,12 @@ class FSDataset implements FSConstants {
     private long reserved;
     private double usableDiskPct = USABLE_DISK_PCT_DEFAULT;
     
-    FSVolume( File currentDir, Configuration conf) throws IOException {
+    FSVolume(File currentDir, Configuration conf) throws IOException {
       this.reserved = conf.getLong("dfs.datanode.du.reserved", 0);
       this.usableDiskPct = conf.getFloat("dfs.datanode.du.pct",
                                          (float) USABLE_DISK_PCT_DEFAULT);
       File parent = currentDir.getParentFile();
-      this.dataDir = new FSDir( currentDir );
+      this.dataDir = new FSDir(currentDir);
       this.tmpDir = new File(parent, "tmp");
       if (tmpDir.exists()) {
         FileUtil.fullyDelete(tmpDir);
@@ -288,7 +288,7 @@ class FSDataset implements FSConstants {
       long capacity = usage.getCapacity();
       long freespace = Math.round(usage.getAvailableSkipRefresh() -
                                   capacity * (1 - usableDiskPct) - reserved); 
-      return ( freespace > 0 ) ? freespace : 0;
+      return (freespace > 0) ? freespace : 0;
     }
       
     String getMount() throws IOException {
@@ -309,7 +309,7 @@ class FSDataset implements FSConstants {
                                 b + ".  File " + f + " should be creatable, but is already present.");
         }
       } catch (IOException ie) {
-        System.out.println("Exception!  " + ie);
+        System.out.println("Exception! " + ie);
         throw ie;
       }
       return f;
@@ -430,7 +430,7 @@ class FSDataset implements FSConstants {
   /**
    * An FSDataset has a directory where it loads its data files.
    */
-  public FSDataset( DataStorage storage, Configuration conf) throws IOException {
+  public FSDataset(DataStorage storage, Configuration conf) throws IOException {
     this.maxBlocksPerDir = conf.getInt("dfs.datanode.numblocks", 64);
     FSVolume[] volArray = new FSVolume[storage.getNumStorageDirs()];
     for (int idx = 0; idx < storage.getNumStorageDirs(); idx++) {
@@ -461,7 +461,7 @@ class FSDataset implements FSConstants {
    * Find the block's on-disk length
    */
   public long getLength(Block b) throws IOException {
-    if (! isValidBlock(b)) {
+    if (!isValidBlock(b)) {
       throw new IOException("Block " + b + " is not valid.");
     }
     File f = getFile(b);
@@ -472,7 +472,7 @@ class FSDataset implements FSConstants {
    * Get a stream of data from the indicated block.
    */
   public synchronized InputStream getBlockData(Block b) throws IOException {
-    if (! isValidBlock(b)) {
+    if (!isValidBlock(b)) {
       throw new IOException("Block " + b + " is not valid.");
     }
     // File should be opened with the lock.
@@ -495,7 +495,7 @@ class FSDataset implements FSConstants {
     // Serialize access to /tmp, and check if file already there.
     //
     File f = null;
-    synchronized ( this ) {
+    synchronized (this) {
       //
       // Is it already in the create process?
       //
@@ -514,7 +514,7 @@ class FSDataset implements FSConstants {
         }
       }
       FSVolume v = null;
-      synchronized ( volumes ) {
+      synchronized (volumes) {
         v = volumes.getNextVolume(blockSize);
         // create temporary file to hold block in the designated volume
         f = v.createTmpFile(b);
@@ -544,7 +544,7 @@ class FSDataset implements FSConstants {
    */
   public synchronized void finalizeBlock(Block b) throws IOException {
     File f = ongoingCreates.get(b);
-    if (f == null || ! f.exists()) {
+    if (f == null || !f.exists()) {
       throw new IOException("No temporary file " + f + " for block " + b);
     }
     long finalLen = f.length();
@@ -552,7 +552,7 @@ class FSDataset implements FSConstants {
     FSVolume v = volumeMap.get(b);
         
     File dest = null;
-    synchronized ( volumes ) {
+    synchronized (volumes) {
       dest = v.addBlock(b, f);
     }
     blockMap.put(b, dest);

+ 66 - 66
src/java/org/apache/hadoop/dfs/FSDirectory.java

@@ -114,12 +114,12 @@ class FSDirectory implements FSConstants {
      * @return Iterator of children
      */
     Iterator<INode> getChildIterator() {
-      return ( children != null ) ?  children.values().iterator() : null;
+      return (children != null) ?  children.values().iterator() : null;
       // instead of null, we could return a static empty iterator.
     }
         
     void addChild(String name, INode node) {
-      if ( children == null ) {
+      if (children == null) {
         children = new TreeMap<String, INode>();
       }
       children.put(name, node);
@@ -129,8 +129,8 @@ class FSDirectory implements FSConstants {
      * This is the external interface
      */
     INode getNode(String target) {
-      if ( target == null || 
-           ! target.startsWith("/") || target.length() == 0) {
+      if (target == null || 
+          !target.startsWith("/") || target.length() == 0) {
         return null;
       } else if (parent == null && "/".equals(target)) {
         return this;
@@ -152,7 +152,7 @@ class FSDirectory implements FSConstants {
     /**
      */
     INode getNode(Vector<String> components, int index) {
-      if (! name.equals(components.elementAt(index))) {
+      if (!name.equals(components.elementAt(index))) {
         return null;
       }
       if (index == components.size()-1) {
@@ -168,8 +168,8 @@ class FSDirectory implements FSConstants {
       }
     }
         
-    INode getChild( String name) {
-      return (children == null) ? null : children.get( name );
+    INode getChild(String name) {
+      return (children == null) ? null : children.get(name);
     }
 
     /**
@@ -183,7 +183,7 @@ class FSDirectory implements FSConstants {
      * @author shv
      */
     INode addNode(String path, INode newNode) throws FileNotFoundException {
-      File target = new File( path );
+      File target = new File(path);
       // find parent
       Path parent = new Path(path).getParent();
       if (parent == null) { // add root
@@ -200,7 +200,7 @@ class FSDirectory implements FSConstants {
       }
       // check whether the parent already has a node with that name
       String name = newNode.name = target.getName();
-      if( parentNode.getChild( name ) != null ) {
+      if (parentNode.getChild(name) != null) {
         return null;
       }
       // insert into the parent children list
@@ -233,7 +233,7 @@ class FSDirectory implements FSConstants {
       }
       incrDeletedFileCount();
       for (Iterator<INode> it = getChildIterator(); it != null &&
-             it.hasNext(); ) {
+             it.hasNext();) {
         it.next().collectSubtreeBlocks(v);
       }
     }
@@ -243,7 +243,7 @@ class FSDirectory implements FSConstants {
     int numItemsInTree() {
       int total = 0;
       for (Iterator<INode> it = getChildIterator(); it != null && 
-             it.hasNext(); ) {
+             it.hasNext();) {
         total += it.next().numItemsInTree();
       }
       return total + 1;
@@ -276,7 +276,7 @@ class FSDirectory implements FSConstants {
     long computeContentsLength() {
       long total = computeFileLength();
       for (Iterator<INode> it = getChildIterator(); it != null && 
-             it.hasNext(); ) {
+             it.hasNext();) {
         total += it.next().computeContentsLength();
       }
       return total;
@@ -302,7 +302,7 @@ class FSDirectory implements FSConstants {
       }
 
       for (Iterator<INode> it = getChildIterator(); it != null && 
-             it.hasNext(); ) {
+             it.hasNext();) {
         v.add(it.next());
       }
     }
@@ -335,17 +335,17 @@ class FSDirectory implements FSConstants {
     directoryMetrics = MetricsUtil.createRecord(metricsContext, "FSDirectory");
   }
 
-  void loadFSImage( Collection<File> dataDirs,
-                    StartupOption startOpt ) throws IOException {
+  void loadFSImage(Collection<File> dataDirs,
+                   StartupOption startOpt) throws IOException {
     // format before starting up if requested
-    if( startOpt == StartupOption.FORMAT ) {
-      fsImage.setStorageDirectories( dataDirs );
+    if (startOpt == StartupOption.FORMAT) {
+      fsImage.setStorageDirectories(dataDirs);
       fsImage.format();
       startOpt = StartupOption.REGULAR;
     }
     try {
-      fsImage.recoverTransitionRead( dataDirs, startOpt );
-    } catch( IOException e ) {
+      fsImage.recoverTransitionRead(dataDirs, startOpt);
+    } catch(IOException e) {
       fsImage.close();
       throw e;
     }
@@ -371,7 +371,7 @@ class FSDirectory implements FSConstants {
    * Block until the object is ready to be used.
    */
   void waitForReady() {
-    if (! ready) {
+    if (!ready) {
       synchronized (this) {
         while (!ready) {
           try {
@@ -391,20 +391,20 @@ class FSDirectory implements FSConstants {
 
     // Always do an implicit mkdirs for parent directory tree
     String pathString = path.toString();
-    if( ! mkdirs(new Path(pathString).getParent().toString()) ) {
+    if (!mkdirs(new Path(pathString).getParent().toString())) {
       return false;
     }
-    INode newNode = new INode( new File(pathString).getName(), blocks, replication);
-    if( ! unprotectedAddFile(path, newNode) ) {
+    INode newNode = new INode(new File(pathString).getName(), blocks, replication);
+    if (!unprotectedAddFile(path, newNode)) {
       NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
                                    +"failed to add "+path+" with "
-                                   +blocks.length+" blocks to the file system" );
+                                   +blocks.length+" blocks to the file system");
       return false;
     }
     // add create file record to log
-    fsImage.getEditLog().logCreateFile( newNode );
+    fsImage.getEditLog().logCreateFile(newNode);
     NameNode.stateChangeLog.debug("DIR* FSDirectory.addFile: "
-                                  +path+" with "+blocks.length+" blocks is added to the file system" );
+                                  +path+" with "+blocks.length+" blocks is added to the file system");
     return true;
   }
     
@@ -413,7 +413,7 @@ class FSDirectory implements FSConstants {
   boolean unprotectedAddFile(UTF8 path, INode newNode) {
     synchronized (rootDir) {
       try {
-        if( rootDir.addNode(path.toString(), newNode ) != null ) {
+        if (rootDir.addNode(path.toString(), newNode) != null) {
           int nrBlocks = (newNode.blocks == null) ? 0 : newNode.blocks.length;
           // Add file->block mapping
           for (int i = 0; i < nrBlocks; i++)
@@ -422,15 +422,15 @@ class FSDirectory implements FSConstants {
         } else {
           return false;
         }
-      } catch (FileNotFoundException e ) {
+      } catch (FileNotFoundException e) {
         return false;
       }
     }
   }
     
-  boolean unprotectedAddFile(UTF8 path, Block[] blocks, short replication ) {
-    return unprotectedAddFile( path,  
-                               new INode( path.toString(), blocks, replication ));
+  boolean unprotectedAddFile(UTF8 path, Block[] blocks, short replication) {
+    return unprotectedAddFile(path,  
+                              new INode(path.toString(), blocks, replication));
   }
 
   /**
@@ -438,9 +438,9 @@ class FSDirectory implements FSConstants {
    */
   public boolean renameTo(UTF8 src, UTF8 dst) {
     NameNode.stateChangeLog.debug("DIR* FSDirectory.renameTo: "
-                                  +src+" to "+dst );
+                                  +src+" to "+dst);
     waitForReady();
-    if( ! unprotectedRenameTo(src, dst) )
+    if (!unprotectedRenameTo(src, dst))
       return false;
     fsImage.getEditLog().logRename(src, dst);
     return true;
@@ -455,29 +455,29 @@ class FSDirectory implements FSConstants {
       INode renamedNode = rootDir.getNode(srcStr);
       if (renamedNode == null) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                                     +"failed to rename "+src+" to "+dst+ " because source does not exist" );
+                                     +"failed to rename "+src+" to "+dst+ " because source does not exist");
         return false;
       }
       if (isDir(dst)) {
         dstStr += "/" + new File(srcStr).getName();
       }
-      if( rootDir.getNode(dstStr.toString()) != null ) {
+      if (rootDir.getNode(dstStr.toString()) != null) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                                     +"failed to rename "+src+" to "+dstStr+ " because destination exists" );
+                                     +"failed to rename "+src+" to "+dstStr+ " because destination exists");
         return false;
       }
       renamedNode.removeNode();
             
       // the renamed node can be reused now
       try {
-        if( rootDir.addNode(dstStr, renamedNode ) != null ) {
+        if (rootDir.addNode(dstStr, renamedNode) != null) {
           NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedRenameTo: "
-                                        +src+" is renamed to "+dst );
+                                        +src+" is renamed to "+dst);
           return true;
         }
-      } catch (FileNotFoundException e ) {
+      } catch (FileNotFoundException e) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedRenameTo: "
-                                     +"failed to rename "+src+" to "+dst );
+                                     +"failed to rename "+src+" to "+dst);
         try {
           rootDir.addNode(srcStr, renamedNode); // put it back
         }catch(FileNotFoundException e2) {                
@@ -497,33 +497,33 @@ class FSDirectory implements FSConstants {
    * @return array of file blocks
    * @throws IOException
    */
-  Block[] setReplication( String src, 
-                          short replication,
-                          Vector<Integer> oldReplication
-                          ) throws IOException {
+  Block[] setReplication(String src, 
+                         short replication,
+                         Vector<Integer> oldReplication
+                         ) throws IOException {
     waitForReady();
-    Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication );
-    if( fileBlocks != null )  // log replication change
-      fsImage.getEditLog().logSetReplication( src, replication );
+    Block[] fileBlocks = unprotectedSetReplication(src, replication, oldReplication);
+    if (fileBlocks != null)  // log replication change
+      fsImage.getEditLog().logSetReplication(src, replication);
     return fileBlocks;
   }
 
-  Block[] unprotectedSetReplication(  String src, 
-                                      short replication,
-                                      Vector<Integer> oldReplication
-                                      ) throws IOException {
-    if( oldReplication == null )
+  Block[] unprotectedSetReplication( String src, 
+                                     short replication,
+                                     Vector<Integer> oldReplication
+                                     ) throws IOException {
+    if (oldReplication == null)
       oldReplication = new Vector<Integer>();
     oldReplication.setSize(1);
-    oldReplication.set( 0, new Integer(-1) );
+    oldReplication.set(0, new Integer(-1));
     Block[] fileBlocks = null;
     synchronized(rootDir) {
       INode fileNode = rootDir.getNode(src);
       if (fileNode == null)
         return null;
-      if( fileNode.isDir() )
+      if (fileNode.isDir())
         return null;
-      oldReplication.set( 0, new Integer( fileNode.blockReplication ));
+      oldReplication.set(0, new Integer(fileNode.blockReplication));
       fileNode.blockReplication = replication;
       fileBlocks = fileNode.blocks;
     }
@@ -555,11 +555,11 @@ class FSDirectory implements FSConstants {
    */
   public Block[] delete(UTF8 src) {
     NameNode.stateChangeLog.debug("DIR* FSDirectory.delete: "
-                                  +src );
+                                  +src);
     waitForReady();
     Block[] blocks = unprotectedDelete(src); 
-    if( blocks != null )
-      fsImage.getEditLog().logDelete( src );
+    if (blocks != null)
+      fsImage.getEditLog().logDelete(src);
     return blocks;
   }
 
@@ -570,20 +570,20 @@ class FSDirectory implements FSConstants {
       INode targetNode = rootDir.getNode(src.toString());
       if (targetNode == null) {
         NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
-                                     +"failed to remove "+src+" because it does not exist" );
+                                     +"failed to remove "+src+" because it does not exist");
         return null;
       } else {
         //
         // Remove the node from the namespace and GC all
         // the blocks underneath the node.
         //
-        if (! targetNode.removeNode()) {
+        if (!targetNode.removeNode()) {
           NameNode.stateChangeLog.warn("DIR* FSDirectory.unprotectedDelete: "
-                                       +"failed to remove "+src+" because it does not have a parent" );
+                                       +"failed to remove "+src+" because it does not have a parent");
           return null;
         } else {
           NameNode.stateChangeLog.debug("DIR* FSDirectory.unprotectedDelete: "
-                                        +src+" is removed" );
+                                        +src+" is removed");
           Vector<Block> v = new Vector<Block>();
           targetNode.collectSubtreeBlocks(v);
           for (Block b : v) {
@@ -675,7 +675,7 @@ class FSDirectory implements FSConstants {
     String srcs = normalizePath(src);
     synchronized (rootDir) {
       if (srcs.startsWith("/") && 
-          ! srcs.endsWith("/") && 
+          !srcs.endsWith("/") && 
           rootDir.getNode(srcs) == null) {
         return true;
       } else {
@@ -722,8 +722,8 @@ class FSDirectory implements FSConstants {
         INode inserted = unprotectedMkdir(cur);
         if (inserted != null) {
           NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
-                                        +"created directory "+cur );
-          fsImage.getEditLog().logMkDir( inserted );
+                                        +"created directory "+cur);
+          fsImage.getEditLog().logMkDir(inserted);
         } else { // otherwise cur exists, verify that it is a directory
           if (!isDir(new UTF8(cur))) {
             NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
@@ -731,7 +731,7 @@ class FSDirectory implements FSConstants {
             return false;
           } 
         }
-      } catch (FileNotFoundException e ) {
+      } catch (FileNotFoundException e) {
         NameNode.stateChangeLog.debug("DIR* FSDirectory.mkdirs: "
                                       +"failed to create directory "+src);
         return false;

+ 77 - 77
src/java/org/apache/hadoop/dfs/FSEditLog.java

@@ -52,8 +52,8 @@ class FSEditLog {
   static class EditLogOutputStream extends DataOutputStream {
     private FileDescriptor fd;
 
-    EditLogOutputStream( File name ) throws IOException {
-      super( new FileOutputStream( name, true )); // open for append
+    EditLogOutputStream(File name) throws IOException {
+      super(new FileOutputStream(name, true)); // open for append
       this.fd = ((FileOutputStream)out).getFD();
     }
 
@@ -63,21 +63,21 @@ class FSEditLog {
     }
 
     void create() throws IOException {
-      writeInt( FSConstants.LAYOUT_VERSION );
+      writeInt(FSConstants.LAYOUT_VERSION);
       flushAndSync();
     }
   }
 
-  FSEditLog( FSImage image ) {
+  FSEditLog(FSImage image) {
     fsimage = image;
   }
 
-  private File getEditFile( int idx ) {
-    return fsimage.getEditFile( idx );
+  private File getEditFile(int idx) {
+    return fsimage.getEditFile(idx);
   }
 
-  private File getEditNewFile( int idx ) {
-    return fsimage.getEditNewFile( idx );
+  private File getEditNewFile(int idx) {
+    return fsimage.getEditNewFile(idx);
   }
   
   private int getNumStorageDirs() {
@@ -96,23 +96,23 @@ class FSEditLog {
    */
   void open() throws IOException {
     int size = getNumStorageDirs();
-    if( editStreams == null )
-      editStreams = new ArrayList<EditLogOutputStream>( size );
+    if (editStreams == null)
+      editStreams = new ArrayList<EditLogOutputStream>(size);
     for (int idx = 0; idx < size; idx++) {
-      File eFile = getEditFile( idx );
+      File eFile = getEditFile(idx);
       try {
-        EditLogOutputStream eStream = new EditLogOutputStream( eFile );
-        editStreams.add( eStream );
+        EditLogOutputStream eStream = new EditLogOutputStream(eFile);
+        editStreams.add(eStream);
       } catch (IOException e) {
-        FSNamesystem.LOG.warn( "Unable to open edit log file " + eFile );
+        FSNamesystem.LOG.warn("Unable to open edit log file " + eFile);
         processIOError(idx); 
         idx--; 
       }
     }
   }
 
-  void createEditLogFile( File name ) throws IOException {
-    EditLogOutputStream eStream = new EditLogOutputStream( name );
+  void createEditLogFile(File name) throws IOException {
+    EditLogOutputStream eStream = new EditLogOutputStream(name);
     eStream.create();
     eStream.flushAndSync();
     eStream.close();
@@ -123,9 +123,9 @@ class FSEditLog {
    */
   void createNewIfMissing() throws IOException {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      File newFile = getEditNewFile( idx );
-      if( ! newFile.exists() )
-        createEditLogFile( newFile );
+      File newFile = getEditNewFile(idx);
+      if (!newFile.exists())
+        createEditLogFile(newFile);
     }
   }
   
@@ -137,7 +137,7 @@ class FSEditLog {
       return;
     }
     for (int idx = 0; idx < editStreams.size(); idx++) {
-      EditLogOutputStream eStream = editStreams.get( idx );
+      EditLogOutputStream eStream = editStreams.get(idx);
       try {
         eStream.flushAndSync();
         eStream.close();
@@ -162,7 +162,7 @@ class FSEditLog {
     assert(index < getNumStorageDirs());
     assert(getNumStorageDirs() == editStreams.size());
 
-    editStreams.remove( index );
+    editStreams.remove(index);
     //
     // Invoke the ioerror routine of the fsimage
     //
@@ -174,7 +174,7 @@ class FSEditLog {
    */
   boolean existsNew() throws IOException {
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      if (getEditNewFile( idx ).exists()) { 
+      if (getEditNewFile(idx).exists()) { 
         return true;
       }
     }
@@ -186,7 +186,7 @@ class FSEditLog {
    * This is where we apply edits that we've been writing to disk all
    * along.
    */
-  int loadFSEdits( File edits ) throws IOException {
+  int loadFSEdits(File edits) throws IOException {
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     FSDirectory fsDir = fsNamesys.dir;
     int numEdits = 0;
@@ -197,7 +197,7 @@ class FSEditLog {
                                                new BufferedInputStream(
                                                                        new FileInputStream(edits)));
       // Read log file version. Could be missing. 
-      in.mark( 4 );
+      in.mark(4);
       // If edits log is greater than 2G, available method will return negative
       // numbers, so we avoid having to call available
       boolean available = true;
@@ -208,16 +208,16 @@ class FSEditLog {
       }
       if (available) {
         in.reset();
-        if( logVersion >= 0 )
+        if (logVersion >= 0)
           logVersion = 0;
         else
           logVersion = in.readInt();
-        if( logVersion < FSConstants.LAYOUT_VERSION ) // future version
+        if (logVersion < FSConstants.LAYOUT_VERSION) // future version
           throw new IOException(
                                 "Unexpected version of the file system log file: "
                                 + logVersion
                                 + ". Current version = " 
-                                + FSConstants.LAYOUT_VERSION + "." );
+                                + FSConstants.LAYOUT_VERSION + ".");
       }
       
       short replication = fsNamesys.getDefaultReplication();
@@ -236,20 +236,20 @@ class FSEditLog {
             ArrayWritable aw = null;
             Writable writables[];
             // version 0 does not support per file replication
-            if( logVersion >= 0 )
+            if (logVersion >= 0)
               name.readFields(in);  // read name only
             else {  // other versions do
               // get name and replication
               aw = new ArrayWritable(UTF8.class);
               aw.readFields(in);
               writables = aw.get(); 
-              if( writables.length != 2 )
+              if (writables.length != 2)
                 throw new IOException("Incorrect data fortmat. " 
                                       + "Name & replication pair expected");
               name = (UTF8) writables[0];
               replication = Short.parseShort(
                                              ((UTF8)writables[1]).toString());
-              replication = adjustReplication( replication );
+              replication = adjustReplication(replication);
             }
             // get blocks
             aw = new ArrayWritable(Block.class);
@@ -258,7 +258,7 @@ class FSEditLog {
             Block blocks[] = new Block[writables.length];
             System.arraycopy(writables, 0, blocks, 0, blocks.length);
             // add to the file tree
-            fsDir.unprotectedAddFile(name, blocks, replication );
+            fsDir.unprotectedAddFile(name, blocks, replication);
             break;
           }
           case OP_SET_REPLICATION: {
@@ -266,7 +266,7 @@ class FSEditLog {
             UTF8 repl = new UTF8();
             src.readFields(in);
             repl.readFields(in);
-            replication = adjustReplication( fromLogReplication(repl) );
+            replication = adjustReplication(fromLogReplication(repl));
             fsDir.unprotectedSetReplication(src.toString(), 
                                             replication,
                                             null);
@@ -293,26 +293,26 @@ class FSEditLog {
             break;
           }
           case OP_DATANODE_ADD: {
-            if( logVersion > -3 )
+            if (logVersion > -3)
               throw new IOException("Unexpected opcode " + opcode 
-                                    + " for version " + logVersion );
+                                    + " for version " + logVersion);
             FSImage.DatanodeImage nodeimage = new FSImage.DatanodeImage();
             nodeimage.readFields(in);
             DatanodeDescriptor node = nodeimage.getDatanodeDescriptor();
-            fsNamesys.unprotectedAddDatanode( node );
+            fsNamesys.unprotectedAddDatanode(node);
             break;
           }
           case OP_DATANODE_REMOVE: {
-            if( logVersion > -3 )
+            if (logVersion > -3)
               throw new IOException("Unexpected opcode " + opcode 
-                                    + " for version " + logVersion );
+                                    + " for version " + logVersion);
             DatanodeID nodeID = new DatanodeID();
             nodeID.readFields(in);
-            DatanodeDescriptor node = fsNamesys.getDatanode( nodeID );
-            if( node != null ) {
-              fsNamesys.unprotectedRemoveDatanode( node );
+            DatanodeDescriptor node = fsNamesys.getDatanode(nodeID);
+            if (node != null) {
+              fsNamesys.unprotectedRemoveDatanode(node);
               // physically remove node from datanodeMap
-              fsNamesys.wipeDatanode( nodeID );
+              fsNamesys.wipeDatanode(nodeID);
             }
             break;
           }
@@ -326,19 +326,19 @@ class FSEditLog {
       }
     }
     
-    if( logVersion != FSConstants.LAYOUT_VERSION ) // other version
+    if (logVersion != FSConstants.LAYOUT_VERSION) // other version
       numEdits++; // save this image asap
     return numEdits;
   }
   
-  static short adjustReplication( short replication) {
+  static short adjustReplication(short replication) {
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     short minReplication = fsNamesys.getMinReplication();
-    if( replication<minReplication ) {
+    if (replication<minReplication) {
       replication = minReplication;
     }
     short maxReplication = fsNamesys.getMaxReplication();
-    if( replication>maxReplication ) {
+    if (replication>maxReplication) {
       replication = maxReplication;
     }
     return replication;
@@ -351,14 +351,14 @@ class FSEditLog {
     assert this.getNumEditStreams() > 0 : "no editlog streams";
     for (int idx = 0; idx < editStreams.size(); idx++) {
       EditLogOutputStream eStream;
-      synchronized ( eStream = editStreams.get( idx ) ) {
+      synchronized (eStream = editStreams.get(idx)) {
         try {
           eStream.write(op);
           if (w1 != null) {
-            w1.write( eStream );
+            w1.write(eStream);
           }
           if (w2 != null) {
-            w2.write( eStream );
+            w2.write(eStream);
           }
           eStream.flushAndSync();
         } catch (IOException ie) {
@@ -377,43 +377,43 @@ class FSEditLog {
   /** 
    * Add create file record to edit log
    */
-  void logCreateFile( FSDirectory.INode newNode ) {
+  void logCreateFile(FSDirectory.INode newNode) {
     UTF8 nameReplicationPair[] = new UTF8[] { 
-      new UTF8( newNode.computeName() ), 
-      FSEditLog.toLogReplication( newNode.getReplication() )};
+      new UTF8(newNode.computeName()), 
+      FSEditLog.toLogReplication(newNode.getReplication())};
     logEdit(OP_ADD,
-            new ArrayWritable( UTF8.class, nameReplicationPair ), 
-            new ArrayWritable( Block.class, newNode.getBlocks() ));
+            new ArrayWritable(UTF8.class, nameReplicationPair), 
+            new ArrayWritable(Block.class, newNode.getBlocks()));
   }
   
   /** 
    * Add create directory record to edit log
    */
-  void logMkDir( FSDirectory.INode newNode ) {
-    logEdit(OP_MKDIR, new UTF8( newNode.computeName() ), null );
+  void logMkDir(FSDirectory.INode newNode) {
+    logEdit(OP_MKDIR, new UTF8(newNode.computeName()), null);
   }
   
   /** 
    * Add rename record to edit log
    * TODO: use String parameters until just before writing to disk
    */
-  void logRename( UTF8 src, UTF8 dst ) {
+  void logRename(UTF8 src, UTF8 dst) {
     logEdit(OP_RENAME, src, dst);
   }
   
   /** 
    * Add set replication record to edit log
    */
-  void logSetReplication( String src, short replication ) {
+  void logSetReplication(String src, short replication) {
     logEdit(OP_SET_REPLICATION, 
             new UTF8(src), 
-            FSEditLog.toLogReplication( replication ));
+            FSEditLog.toLogReplication(replication));
   }
   
   /** 
    * Add delete file record to edit log
    */
-  void logDelete( UTF8 src ) {
+  void logDelete(UTF8 src) {
     logEdit(OP_DELETE, src, null);
   }
   
@@ -421,23 +421,23 @@ class FSEditLog {
    * Creates a record in edit log corresponding to a new data node
    * registration event.
    */
-  void logAddDatanode( DatanodeDescriptor node ) {
-    logEdit( OP_DATANODE_ADD, new FSImage.DatanodeImage(node), null );
+  void logAddDatanode(DatanodeDescriptor node) {
+    logEdit(OP_DATANODE_ADD, new FSImage.DatanodeImage(node), null);
   }
   
   /** 
    * Creates a record in edit log corresponding to a data node
    * removal event.
    */
-  void logRemoveDatanode( DatanodeID nodeID ) {
-    logEdit( OP_DATANODE_REMOVE, new DatanodeID( nodeID ), null );
+  void logRemoveDatanode(DatanodeID nodeID) {
+    logEdit(OP_DATANODE_REMOVE, new DatanodeID(nodeID), null);
   }
   
-  static UTF8 toLogReplication( short replication ) {
-    return new UTF8( Short.toString(replication));
+  static UTF8 toLogReplication(short replication) {
+    return new UTF8(Short.toString(replication));
   }
   
-  static short fromLogReplication( UTF8 replication ) {
+  static short fromLogReplication(UTF8 replication) {
     return Short.parseShort(replication.toString());
   }
 
@@ -448,9 +448,9 @@ class FSEditLog {
     assert(getNumStorageDirs() == editStreams.size());
     long size = 0;
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      synchronized (editStreams.get( idx )) {
-        assert(size == 0 || size == getEditFile( idx ).length());
-        size = getEditFile( idx ).length();
+      synchronized (editStreams.get(idx)) {
+        assert(size == 0 || size == getEditFile(idx).length());
+        size = getEditFile(idx).length();
       }
     }
     return size;
@@ -472,11 +472,11 @@ class FSEditLog {
     //
     // Open edits.new
     //
-    for (int idx = 0; idx < getNumStorageDirs(); idx++ ) {
+    for (int idx = 0; idx < getNumStorageDirs(); idx++) {
       try {
-        EditLogOutputStream eStream = new EditLogOutputStream( getEditNewFile( idx ));
+        EditLogOutputStream eStream = new EditLogOutputStream(getEditNewFile(idx));
         eStream.create();
-        editStreams.add( eStream );
+        editStreams.add(eStream);
       } catch (IOException e) {
         processIOError(idx);
         idx--;
@@ -501,14 +501,14 @@ class FSEditLog {
     //
     // Delete edits and rename edits.new to edits.
     //
-    for (int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      if (!getEditNewFile( idx ).renameTo(getEditFile( idx ))) {
+    for (int idx = 0; idx < getNumStorageDirs(); idx++) {
+      if (!getEditNewFile(idx).renameTo(getEditFile(idx))) {
         //
         // renameTo() fails on Windows if the destination
         // file exists.
         //
-        getEditFile( idx ).delete();
-        if (!getEditNewFile( idx ).renameTo(getEditFile( idx ))) {
+        getEditFile(idx).delete();
+        if (!getEditNewFile(idx).renameTo(getEditFile(idx))) {
           processIOError(idx); 
           idx--; 
         }
@@ -524,6 +524,6 @@ class FSEditLog {
    * Return the name of the edit file
    */
   File getFsEditName() throws IOException {
-    return getEditFile( 0 );
+    return getEditFile(0);
   }
 }

+ 235 - 235
src/java/org/apache/hadoop/dfs/FSImage.java

@@ -59,7 +59,7 @@ class FSImage extends Storage {
     EDITS_NEW ("edits.new");
     
     private String fileName = null;
-    private NameNodeFile( String name ) {this.fileName = name;}
+    private NameNodeFile(String name) {this.fileName = name;}
     String getName() {return fileName;}
   }
   
@@ -70,53 +70,53 @@ class FSImage extends Storage {
   /**
    */
   FSImage() {
-    super( NodeType.NAME_NODE );
-    this.editLog = new FSEditLog( this );
+    super(NodeType.NAME_NODE);
+    this.editLog = new FSEditLog(this);
   }
 
   /**
    */
-  FSImage( Collection<File> fsDirs ) throws IOException {
+  FSImage(Collection<File> fsDirs) throws IOException {
     this();
-    setStorageDirectories( fsDirs );
+    setStorageDirectories(fsDirs);
   }
 
-  FSImage( StorageInfo storageInfo ) {
-    super( NodeType.NAME_NODE, storageInfo );
+  FSImage(StorageInfo storageInfo) {
+    super(NodeType.NAME_NODE, storageInfo);
   }
 
   /**
    * Represents an Image (image and edit file).
    */
-  FSImage( File imageDir ) throws IOException {
+  FSImage(File imageDir) throws IOException {
     this();
     ArrayList<File> dirs = new ArrayList<File>(1);
-    dirs.add( imageDir );
-    setStorageDirectories( dirs );
+    dirs.add(imageDir);
+    setStorageDirectories(dirs);
   }
   
-  void setStorageDirectories( Collection<File> fsDirs ) throws IOException {
-    this.storageDirs = new ArrayList<StorageDirectory>( fsDirs.size() );
-    for( Iterator<File> it = fsDirs.iterator(); it.hasNext(); )
-      this.addStorageDir( new StorageDirectory( it.next() ));
+  void setStorageDirectories(Collection<File> fsDirs) throws IOException {
+    this.storageDirs = new ArrayList<StorageDirectory>(fsDirs.size());
+    for(Iterator<File> it = fsDirs.iterator(); it.hasNext();)
+      this.addStorageDir(new StorageDirectory(it.next()));
   }
 
   /**
    */
-  File getImageFile( int imageDirIdx, NameNodeFile type ) {
-    return getImageFile( getStorageDir( imageDirIdx ), type );
+  File getImageFile(int imageDirIdx, NameNodeFile type) {
+    return getImageFile(getStorageDir(imageDirIdx), type);
   }
   
-  static File getImageFile( StorageDirectory sd, NameNodeFile type ) {
-    return new File( sd.getCurrentDir(), type.getName() );
+  static File getImageFile(StorageDirectory sd, NameNodeFile type) {
+    return new File(sd.getCurrentDir(), type.getName());
   }
   
-  File getEditFile( int idx ) {
-    return getImageFile( idx, NameNodeFile.EDITS );
+  File getEditFile(int idx) {
+    return getImageFile(idx, NameNodeFile.EDITS);
   }
   
-  File getEditNewFile( int idx ) {
-    return getImageFile( idx, NameNodeFile.EDITS_NEW );
+  File getEditNewFile(int idx) {
+    return getImageFile(idx, NameNodeFile.EDITS_NEW);
   }
   
   /**
@@ -129,42 +129,42 @@ class FSImage extends Storage {
    * @param startOpt startup option
    * @throws IOException
    */
-  void recoverTransitionRead( Collection<File> dataDirs,
-                              StartupOption startOpt
-                              ) throws IOException {
+  void recoverTransitionRead(Collection<File> dataDirs,
+                             StartupOption startOpt
+                             ) throws IOException {
     assert startOpt != StartupOption.FORMAT : 
       "NameNode formatting should be performed before reading the image";
     // 1. For each data directory calculate its state and 
     // check whether all is consistent before transitioning.
-    this.storageDirs = new ArrayList<StorageDirectory>( dataDirs.size() );
+    this.storageDirs = new ArrayList<StorageDirectory>(dataDirs.size());
     AbstractList<StorageState> dataDirStates = 
-      new ArrayList<StorageState>( dataDirs.size() );
+      new ArrayList<StorageState>(dataDirs.size());
     boolean isFormatted = false;
-    for( Iterator<File> it = dataDirs.iterator(); it.hasNext(); ) {
+    for(Iterator<File> it = dataDirs.iterator(); it.hasNext();) {
       File dataDir = it.next();
-      StorageDirectory sd = new StorageDirectory( dataDir );
+      StorageDirectory sd = new StorageDirectory(dataDir);
       StorageState curState;
       try {
-        curState = sd.analyzeStorage( startOpt );
+        curState = sd.analyzeStorage(startOpt);
         // sd is locked but not opened
-        switch( curState ) {
+        switch(curState) {
         case NON_EXISTENT:
           // name-node fails if any of the configured storage dirs are missing
-          throw new InconsistentFSStateException( sd.root,
-                                                  "storage directory does not exist or is not accessible." );
+          throw new InconsistentFSStateException(sd.root,
+                                                 "storage directory does not exist or is not accessible.");
         case NOT_FORMATTED:
           break;
         case CONVERT:
-          if( convertLayout( sd ) ) // need to reformat empty image
+          if (convertLayout(sd)) // need to reformat empty image
             curState = StorageState.NOT_FORMATTED;
           break;
         case NORMAL:
           break;
         default:  // recovery is possible
-          sd.doRecover( curState );      
+          sd.doRecover(curState);      
         }
-        if( curState != StorageState.NOT_FORMATTED 
-            && startOpt != StartupOption.ROLLBACK ) {
+        if (curState != StorageState.NOT_FORMATTED 
+            && startOpt != StartupOption.ROLLBACK) {
           sd.read(); // read and verify consistency with other directories
           isFormatted = true;
         }
@@ -173,34 +173,34 @@ class FSImage extends Storage {
         throw ioe;
       }
       // add to the storage list
-      addStorageDir( sd );
-      dataDirStates.add( curState );
+      addStorageDir(sd);
+      dataDirStates.add(curState);
     }
 
-    if( dataDirs.size() == 0 )  // none of the data dirs exist
-      throw new IOException( 
-                            "All specified directories are not accessible or do not exist." );
-    if( ! isFormatted && startOpt != StartupOption.ROLLBACK )
-      throw new IOException( "NameNode is not formatted." );
-    if( startOpt != StartupOption.UPGRADE
+    if (dataDirs.size() == 0)  // none of the data dirs exist
+      throw new IOException(
+                            "All specified directories are not accessible or do not exist.");
+    if (!isFormatted && startOpt != StartupOption.ROLLBACK)
+      throw new IOException("NameNode is not formatted.");
+    if (startOpt != StartupOption.UPGRADE
         && layoutVersion < LAST_PRE_UPGRADE_LAYOUT_VERSION
-        && layoutVersion != FSConstants.LAYOUT_VERSION )
-      throw new IOException( 
+        && layoutVersion != FSConstants.LAYOUT_VERSION)
+      throw new IOException(
                             "\nFile system image contains an old layout version " + layoutVersion
                             + ".\nAn upgrade to version " + FSConstants.LAYOUT_VERSION
-                            + " is required.\nPlease restart NameNode with -upgrade option." );
+                            + " is required.\nPlease restart NameNode with -upgrade option.");
 
     // 2. Format unformatted dirs.
     this.checkpointTime = 0L;
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      StorageState curState = dataDirStates.get( idx );
-      switch( curState ) {
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      StorageState curState = dataDirStates.get(idx);
+      switch(curState) {
       case NON_EXISTENT:
         assert false : StorageState.NON_EXISTENT + " state cannot be here";
       case NOT_FORMATTED:
-        LOG.info( "Storage directory " + sd.root + " is not formatted." );
-        LOG.info( "Formatting ..." );
+        LOG.info("Storage directory " + sd.root + " is not formatted.");
+        LOG.info("Formatting ...");
         sd.clearDirectory(); // create empty currrent dir
         break;
       default:
@@ -209,7 +209,7 @@ class FSImage extends Storage {
     }
 
     // 3. Do transitions
-    switch( startOpt ) {
+    switch(startOpt) {
     case UPGRADE:
       doUpgrade();
       break;
@@ -217,7 +217,7 @@ class FSImage extends Storage {
       doRollback();
       // and now load that image
     case REGULAR:
-      if( loadFSImage() )
+      if (loadFSImage())
         saveFSImage();
       else
         editLog.open();
@@ -229,12 +229,12 @@ class FSImage extends Storage {
   private void doUpgrade() throws IOException {
     // Upgrade is allowed only if there are 
     // no previous fs states in any of the directories
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      if( sd.getPreviousDir().exists() )
-        throw new InconsistentFSStateException( sd.root,
-                                                "previous fs state should not exist during upgrade. "
-                                                + "Finalize or rollback first." );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      if (sd.getPreviousDir().exists())
+        throw new InconsistentFSStateException(sd.root,
+                                               "previous fs state should not exist during upgrade. "
+                                               + "Finalize or rollback first.");
     }
 
     // load the latest image
@@ -246,32 +246,32 @@ class FSImage extends Storage {
     int oldLV = this.getLayoutVersion();
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.checkpointTime = FSNamesystem.now();
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      LOG.info( "Upgrading image directory " + sd.root 
-                + ".\n   old LV = " + oldLV
-                + "; old CTime = " + oldCTime
-                + ".\n   new LV = " + this.getLayoutVersion()
-                + "; new CTime = " + this.getCTime() );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      LOG.info("Upgrading image directory " + sd.root 
+               + ".\n   old LV = " + oldLV
+               + "; old CTime = " + oldCTime
+               + ".\n   new LV = " + this.getLayoutVersion()
+               + "; new CTime = " + this.getCTime());
       File curDir = sd.getCurrentDir();
       File prevDir = sd.getPreviousDir();
       File tmpDir = sd.getPreviousTmp();
       assert curDir.exists() : "Current directory must exist.";
-      assert ! prevDir.exists() : "prvious directory must not exist.";
-      assert ! tmpDir.exists() : "prvious.tmp directory must not exist.";
+      assert !prevDir.exists() : "prvious directory must not exist.";
+      assert !tmpDir.exists() : "prvious.tmp directory must not exist.";
       // rename current to tmp
-      rename( curDir, tmpDir );
+      rename(curDir, tmpDir);
       // save new image
-      if( ! curDir.mkdir() )
-        throw new IOException("Cannot create directory " + curDir );
-      saveFSImage( getImageFile( sd, NameNodeFile.IMAGE ));
-      editLog.createEditLogFile( getImageFile( sd, NameNodeFile.EDITS ));
+      if (!curDir.mkdir())
+        throw new IOException("Cannot create directory " + curDir);
+      saveFSImage(getImageFile(sd, NameNodeFile.IMAGE));
+      editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
       // write version and time files
       sd.write();
       // rename tmp to previous
-      rename( tmpDir, prevDir );
+      rename(tmpDir, prevDir);
       isUpgradeFinalized = false;
-      LOG.info( "Upgrade of " + sd.root + " is complete." );
+      LOG.info("Upgrade of " + sd.root + " is complete.");
     }
     editLog.open();
   }
@@ -283,91 +283,91 @@ class FSImage extends Storage {
     boolean canRollback = false;
     FSImage prevState = new FSImage();
     prevState.layoutVersion = FSConstants.LAYOUT_VERSION;
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
       File prevDir = sd.getPreviousDir();
-      if( ! prevDir.exists() ) {  // use current directory then
-        LOG.info( "Storage directory " + sd.root
-                  + " does not contain previous fs state." );
+      if (!prevDir.exists()) {  // use current directory then
+        LOG.info("Storage directory " + sd.root
+                 + " does not contain previous fs state.");
         sd.read(); // read and verify consistency with other directories
         continue;
       }
-      StorageDirectory sdPrev = prevState.new StorageDirectory( sd.root );
-      sdPrev.read( sdPrev.getPreviousVersionFile() );  // read and verify consistency of the prev dir
+      StorageDirectory sdPrev = prevState.new StorageDirectory(sd.root);
+      sdPrev.read(sdPrev.getPreviousVersionFile());  // read and verify consistency of the prev dir
       canRollback = true;
     }
-    if( ! canRollback )
-      throw new IOException( "Cannot rollback. " 
-                             + "None of the storage directories contain previous fs state." );
+    if (!canRollback)
+      throw new IOException("Cannot rollback. " 
+                            + "None of the storage directories contain previous fs state.");
 
     // Now that we know all directories are going to be consistent
     // Do rollback for each directory containing previous state
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
       File prevDir = sd.getPreviousDir();
-      if( ! prevDir.exists() )
+      if (!prevDir.exists())
         continue;
 
-      LOG.info( "Rolling back storage directory " + sd.root 
-                + ".\n   new LV = " + prevState.getLayoutVersion()
-                + "; new CTime = " + prevState.getCTime() );
+      LOG.info("Rolling back storage directory " + sd.root 
+               + ".\n   new LV = " + prevState.getLayoutVersion()
+               + "; new CTime = " + prevState.getCTime());
       File tmpDir = sd.getRemovedTmp();
-      assert ! tmpDir.exists() : "removed.tmp directory must not exist.";
+      assert !tmpDir.exists() : "removed.tmp directory must not exist.";
       // rename current to tmp
       File curDir = sd.getCurrentDir();
       assert curDir.exists() : "Current directory must exist.";
-      rename( curDir, tmpDir );
+      rename(curDir, tmpDir);
       // rename previous to current
-      rename( prevDir, curDir );
+      rename(prevDir, curDir);
 
       // delete tmp dir
-      deleteDir( tmpDir );
-      LOG.info( "Rollback of " + sd.root + " is complete." );
+      deleteDir(tmpDir);
+      LOG.info("Rollback of " + sd.root + " is complete.");
     }
     isUpgradeFinalized = true;
   }
 
-  private void doFinalize( StorageDirectory sd ) throws IOException {
+  private void doFinalize(StorageDirectory sd) throws IOException {
     File prevDir = sd.getPreviousDir();
-    if( ! prevDir.exists() )
+    if (!prevDir.exists())
       return; // already discarded
-    LOG.info( "Finalizing upgrade for storage directory " 
-              + sd.root 
-              + ".\n   cur LV = " + this.getLayoutVersion()
-              + "; cur CTime = " + this.getCTime() );
+    LOG.info("Finalizing upgrade for storage directory " 
+             + sd.root 
+             + ".\n   cur LV = " + this.getLayoutVersion()
+             + "; cur CTime = " + this.getCTime());
     assert sd.getCurrentDir().exists() : "Current directory must exist.";
     final File tmpDir = sd.getFinalizedTmp();
     // rename previous to tmp and remove
-    rename( prevDir, tmpDir );
-    deleteDir( tmpDir );
+    rename(prevDir, tmpDir);
+    deleteDir(tmpDir);
     isUpgradeFinalized = true;
-    LOG.info( "Finalize upgrade for " + sd.root + " is complete." );
+    LOG.info("Finalize upgrade for " + sd.root + " is complete.");
   }
 
   void finalizeUpgrade() throws IOException {
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ )
-      doFinalize( getStorageDir( idx ));
+    for(int idx = 0; idx < getNumStorageDirs(); idx++)
+      doFinalize(getStorageDir(idx));
   }
 
   boolean isUpgradeFinalized() {
     return isUpgradeFinalized;
   }
 
-  protected void getFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.getFields( props, sd );
-    if( layoutVersion == 0 )
+  protected void getFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.getFields(props, sd);
+    if (layoutVersion == 0)
       throw new IOException("NameNode directory " 
-                            + sd.root + " is not formatted." );
-    this.checkpointTime = readCheckpointTime( sd );
+                            + sd.root + " is not formatted.");
+    this.checkpointTime = readCheckpointTime(sd);
   }
 
-  long readCheckpointTime( StorageDirectory sd ) throws IOException {
-    File timeFile = getImageFile( sd, NameNodeFile.TIME );
+  long readCheckpointTime(StorageDirectory sd) throws IOException {
+    File timeFile = getImageFile(sd, NameNodeFile.TIME);
     long timeStamp = 0L;
-    if( timeFile.exists() && timeFile.canRead() ) {
-      DataInputStream in = new DataInputStream( new FileInputStream(timeFile) );
+    if (timeFile.exists() && timeFile.canRead()) {
+      DataInputStream in = new DataInputStream(new FileInputStream(timeFile));
       try {
         timeStamp = in.readLong();
       } finally {
@@ -387,11 +387,11 @@ class FSImage extends Storage {
    * @param sd storage directory
    * @throws IOException
    */
-  protected void setFields( Properties props, 
-                            StorageDirectory sd 
-                            ) throws IOException {
-    super.setFields( props, sd );
-    writeCheckpointTime( sd );
+  protected void setFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    super.setFields(props, sd);
+    writeCheckpointTime(sd);
   }
 
   /**
@@ -400,15 +400,15 @@ class FSImage extends Storage {
    * @param sd
    * @throws IOException
    */
-  void writeCheckpointTime( StorageDirectory sd ) throws IOException {
-    if( checkpointTime < 0L )
+  void writeCheckpointTime(StorageDirectory sd) throws IOException {
+    if (checkpointTime < 0L)
       return; // do not write negative time
-    File timeFile = getImageFile( sd, NameNodeFile.TIME );
+    File timeFile = getImageFile(sd, NameNodeFile.TIME);
     if (timeFile.exists()) { timeFile.delete(); }
     DataOutputStream out = new DataOutputStream(
                                                 new FileOutputStream(timeFile));
     try {
-      out.writeLong( checkpointTime );
+      out.writeLong(checkpointTime);
     } finally {
       out.close();
     }
@@ -422,41 +422,41 @@ class FSImage extends Storage {
    */
   void processIOError(int index) throws IOException {
     int nrDirs = getNumStorageDirs();
-    assert( index >= 0 && index < nrDirs );
-    if( nrDirs == 1 )
+    assert(index >= 0 && index < nrDirs);
+    if (nrDirs == 1)
       throw new IOException("Checkpoint directories inaccessible.");
-    storageDirs.remove( index );
+    storageDirs.remove(index);
   }
 
   FSEditLog getEditLog() {
     return editLog;
   }
 
-  boolean isConversionNeeded( StorageDirectory sd ) throws IOException {
-    File oldImageDir = new File( sd.root, "image" );
-    if( ! oldImageDir.exists() )
+  boolean isConversionNeeded(StorageDirectory sd) throws IOException {
+    File oldImageDir = new File(sd.root, "image");
+    if (!oldImageDir.exists())
       return false;
     // check consistency of the old storage
-    if( ! oldImageDir.isDirectory() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldImageDir + " is not a directory." );
-    if( ! oldImageDir.canWrite() )
-      throw new InconsistentFSStateException( sd.root,
-                                              oldImageDir + " is not writable." );
+    if (!oldImageDir.isDirectory())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldImageDir + " is not a directory.");
+    if (!oldImageDir.canWrite())
+      throw new InconsistentFSStateException(sd.root,
+                                             oldImageDir + " is not writable.");
     return true;
   }
   
-  private boolean convertLayout( StorageDirectory sd ) throws IOException {
+  private boolean convertLayout(StorageDirectory sd) throws IOException {
     assert FSConstants.LAYOUT_VERSION < LAST_PRE_UPGRADE_LAYOUT_VERSION :
       "Bad current layout version: FSConstants.LAYOUT_VERSION should decrease";
-    File oldImageDir = new File( sd.root, "image" );
+    File oldImageDir = new File(sd.root, "image");
     assert oldImageDir.exists() : "Old image directory is missing";
-    File oldImage = new File( oldImageDir, "fsimage" );
+    File oldImage = new File(oldImageDir, "fsimage");
     
-    LOG.info( "Old layout version directory " + oldImageDir
-              + " is found. New layout version is "
-              + FSConstants.LAYOUT_VERSION );
-    LOG.info( "Trying to convert ..." );
+    LOG.info("Old layout version directory " + oldImageDir
+             + " is found. New layout version is "
+             + FSConstants.LAYOUT_VERSION);
+    LOG.info("Trying to convert ...");
 
     // we did not use locking for the pre upgrade layout, so we cannot prevent 
     // old name-nodes from running in the same directory as the new ones
@@ -464,35 +464,35 @@ class FSImage extends Storage {
     // check new storage
     File newImageDir = sd.getCurrentDir();
     File versionF = sd.getVersionFile();
-    if( versionF.exists() )
-      throw new IOException( "Version file already exists: " + versionF );
-    if( newImageDir.exists() ) // // somebody created current dir manually
-      deleteDir( newImageDir );
+    if (versionF.exists())
+      throw new IOException("Version file already exists: " + versionF);
+    if (newImageDir.exists()) // // somebody created current dir manually
+      deleteDir(newImageDir);
 
     // move old image files into new location
-    rename( oldImageDir, newImageDir );
-    File oldEdits1 = new File( sd.root, "edits" );
+    rename(oldImageDir, newImageDir);
+    File oldEdits1 = new File(sd.root, "edits");
     // move old edits into data
-    if( oldEdits1.exists() )
-      rename( oldEdits1, getImageFile( sd, NameNodeFile.EDITS ));
-    File oldEdits2 = new File( sd.root, "edits.new" );
-    if( oldEdits2.exists() )
-      rename( oldEdits2, getImageFile( sd, NameNodeFile.EDITS_NEW ));
+    if (oldEdits1.exists())
+      rename(oldEdits1, getImageFile(sd, NameNodeFile.EDITS));
+    File oldEdits2 = new File(sd.root, "edits.new");
+    if (oldEdits2.exists())
+      rename(oldEdits2, getImageFile(sd, NameNodeFile.EDITS_NEW));
 
     // Write new layout with 
     // setting layoutVersion = LAST_PRE_UPGRADE_LAYOUT_VERSION
     // means the actual version should be obtained from the image file
     this.layoutVersion = LAST_PRE_UPGRADE_LAYOUT_VERSION;
-    File newImageFile = getImageFile( sd, NameNodeFile.IMAGE );
+    File newImageFile = getImageFile(sd, NameNodeFile.IMAGE);
     boolean needReformat = false;
-    if( ! newImageFile.exists() ) {
+    if (!newImageFile.exists()) {
       // in pre upgrade versions image file was allowed not to exist
       // we treat it as non formatted then
-      LOG.info( "Old image file " + oldImage + " does not exist. " );
+      LOG.info("Old image file " + oldImage + " does not exist. ");
       needReformat = true;
     } else {
       sd.write();
-      LOG.info( "Conversion of " + oldImage + " is complete." );
+      LOG.info("Conversion of " + oldImage + " is complete.");
     }
     return needReformat;
   }
@@ -500,15 +500,15 @@ class FSImage extends Storage {
   //
   // Atomic move sequence, to recover from interrupted checkpoint
   //
-  void recoverInterruptedCheckpoint( StorageDirectory sd ) throws IOException {
-    File curFile = getImageFile( sd, NameNodeFile.IMAGE );
-    File ckptFile = getImageFile( sd, NameNodeFile.IMAGE_NEW );
+  void recoverInterruptedCheckpoint(StorageDirectory sd) throws IOException {
+    File curFile = getImageFile(sd, NameNodeFile.IMAGE);
+    File ckptFile = getImageFile(sd, NameNodeFile.IMAGE_NEW);
 
     //
     // If we were in the midst of a checkpoint
     //
     if (ckptFile.exists()) {
-      if (getImageFile( sd, NameNodeFile.EDITS_NEW ).exists()) {
+      if (getImageFile(sd, NameNodeFile.EDITS_NEW).exists()) {
         //
         // checkpointing migth have uploaded a new
         // merged image, but we discard it here because we are
@@ -552,23 +552,23 @@ class FSImage extends Storage {
     boolean needToSave = false;
     isUpgradeFinalized = true;
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      StorageDirectory sd = getStorageDir( idx );
-      recoverInterruptedCheckpoint( sd );
-      if( ! sd.getVersionFile().exists() ) {
+      StorageDirectory sd = getStorageDir(idx);
+      recoverInterruptedCheckpoint(sd);
+      if (!sd.getVersionFile().exists()) {
         needToSave |= true;
         continue; // some of them might have just been formatted
       }
-      assert getImageFile( sd, NameNodeFile.IMAGE ).exists() :
+      assert getImageFile(sd, NameNodeFile.IMAGE).exists() :
         "Image file must exist.";
-      checkpointTime = readCheckpointTime( sd );
-      if( latestCheckpointTime < checkpointTime ) {
+      checkpointTime = readCheckpointTime(sd);
+      if (latestCheckpointTime < checkpointTime) {
         latestCheckpointTime = checkpointTime;
         latestSD = sd;
       }
-      if( checkpointTime <= 0L )
+      if (checkpointTime <= 0L)
         needToSave |= true;
       // set finalized flag
-      isUpgradeFinalized &= ! sd.getPreviousDir().exists();
+      isUpgradeFinalized &= !sd.getPreviousDir().exists();
     }
     assert latestSD != null : "Latest storage directory was not determined.";
 
@@ -576,13 +576,13 @@ class FSImage extends Storage {
     // Load in bits
     //
     latestSD.read();
-    needToSave |= loadFSImage( getImageFile( latestSD, NameNodeFile.IMAGE ));
+    needToSave |= loadFSImage(getImageFile(latestSD, NameNodeFile.IMAGE));
 
     //
     // read in the editlog from the same directory from
     // which we read in the image
     //
-    needToSave |= ( loadFSEdits( latestSD ) > 0 );
+    needToSave |= (loadFSEdits(latestSD) > 0);
 
     return needToSave;
   }
@@ -592,7 +592,7 @@ class FSImage extends Storage {
    * filenames and blocks.  Return whether we should
    * "re-save" and consolidate the edit-logs
    */
-  boolean loadFSImage( File curFile ) throws IOException {
+  boolean loadFSImage(File curFile) throws IOException {
     assert this.getLayoutVersion() < 0 : "Negative layout version is expected.";
     assert curFile != null : "curFile is null";
 
@@ -613,13 +613,13 @@ class FSImage extends Storage {
       // read image version: first appeared in version -1
       imgVersion = in.readInt();
       // read namespaceID: first appeared in version -2
-      if( imgVersion <= -2 )
+      if (imgVersion <= -2)
         this.namespaceID = in.readInt();
       // read number of files
       int numFiles = 0;
       // version 0 does not store version #
       // starts directly with the number of files
-      if( imgVersion >= 0 ) {
+      if (imgVersion >= 0) {
         numFiles = imgVersion;
         imgVersion = 0;
       } else {
@@ -627,7 +627,7 @@ class FSImage extends Storage {
       }
       this.layoutVersion = imgVersion;
 
-      needToSave = ( imgVersion != FSConstants.LAYOUT_VERSION );
+      needToSave = (imgVersion != FSConstants.LAYOUT_VERSION);
 
       // read file info
       short replication = FSNamesystem.getFSNamesystem().getDefaultReplication();
@@ -635,9 +635,9 @@ class FSImage extends Storage {
         UTF8 name = new UTF8();
         name.readFields(in);
         // version 0 does not support per file replication
-        if( !(imgVersion >= 0) ) {
+        if (!(imgVersion >= 0)) {
           replication = in.readShort(); // other versions do
-          replication = FSEditLog.adjustReplication( replication );
+          replication = FSEditLog.adjustReplication(replication);
         }
         int numBlocks = in.readInt();
         Block blocks[] = null;
@@ -648,11 +648,11 @@ class FSImage extends Storage {
             blocks[j].readFields(in);
           }
         }
-        fsDir.unprotectedAddFile(name, blocks, replication );
+        fsDir.unprotectedAddFile(name, blocks, replication);
       }
       
       // load datanode info
-      this.loadDatanodes( imgVersion, in );
+      this.loadDatanodes(imgVersion, in);
     } finally {
       in.close();
     }
@@ -667,19 +667,19 @@ class FSImage extends Storage {
    * @return number of edits loaded
    * @throws IOException
    */
-  int loadFSEdits( StorageDirectory sd ) throws IOException {
+  int loadFSEdits(StorageDirectory sd) throws IOException {
     int numEdits = 0;
-    numEdits = editLog.loadFSEdits( getImageFile( sd, NameNodeFile.EDITS ));
-    File editsNew = getImageFile( sd, NameNodeFile.EDITS_NEW );
-    if( editsNew.exists() ) 
-      numEdits += editLog.loadFSEdits( editsNew );
+    numEdits = editLog.loadFSEdits(getImageFile(sd, NameNodeFile.EDITS));
+    File editsNew = getImageFile(sd, NameNodeFile.EDITS_NEW);
+    if (editsNew.exists()) 
+      numEdits += editLog.loadFSEdits(editsNew);
     return numEdits;
   }
 
   /**
    * Save the contents of the FS image to the file.
    */
-  void saveFSImage( File newFile  ) throws IOException {
+  void saveFSImage(File newFile ) throws IOException {
     FSDirectory fsDir = FSNamesystem.getFSNamesystem().dir;
     //
     // Write out data
@@ -691,8 +691,8 @@ class FSImage extends Storage {
       out.writeInt(FSConstants.LAYOUT_VERSION);
       out.writeInt(namespaceID);
       out.writeInt(fsDir.rootDir.numItemsInTree() - 1);
-      saveImage( "", fsDir.rootDir, out );
-      saveDatanodes( out );
+      saveImage("", fsDir.rootDir, out);
+      saveDatanodes(out);
     } finally {
       out.close();
     }
@@ -704,9 +704,9 @@ class FSImage extends Storage {
   void saveFSImage() throws IOException {
     editLog.createNewIfMissing();
     for (int idx = 0; idx < getNumStorageDirs(); idx++) {
-      StorageDirectory sd = getStorageDir( idx );
-      saveFSImage( getImageFile( sd, NameNodeFile.IMAGE_NEW ));
-      editLog.createEditLogFile( getImageFile( sd, NameNodeFile.EDITS ));
+      StorageDirectory sd = getStorageDir(idx);
+      saveFSImage(getImageFile(sd, NameNodeFile.IMAGE_NEW));
+      editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
     }
     rollFSImage();
   }
@@ -725,27 +725,27 @@ class FSImage extends Storage {
    */
   private int newNamespaceID() {
     Random r = new Random();
-    r.setSeed( FSNamesystem.now() );
+    r.setSeed(FSNamesystem.now());
     int newID = 0;
-    while( newID == 0)
-      newID = r.nextInt( 0x7FFFFFFF );  // use 31 bits only
+    while(newID == 0)
+      newID = r.nextInt(0x7FFFFFFF);  // use 31 bits only
     return newID;
   }
 
   /** Create new dfs name directory.  Caution: this destroys all files
    * in this filesystem. */
-  void format( StorageDirectory sd ) throws IOException {
+  void format(StorageDirectory sd) throws IOException {
     sd.clearDirectory(); // create currrent dir
     sd.lock();
     try {
-      saveFSImage( getImageFile( sd, NameNodeFile.IMAGE ));
-      editLog.createEditLogFile( getImageFile( sd, NameNodeFile.EDITS ));
+      saveFSImage(getImageFile(sd, NameNodeFile.IMAGE));
+      editLog.createEditLogFile(getImageFile(sd, NameNodeFile.EDITS));
       sd.write();
     } finally {
       sd.unlock();
     }
-    LOG.info( "Storage directory " + sd.root 
-              + " has been successfully formatted." );
+    LOG.info("Storage directory " + sd.root 
+             + " has been successfully formatted.");
   }
 
   public void format() throws IOException {
@@ -753,9 +753,9 @@ class FSImage extends Storage {
     this.namespaceID = newNamespaceID();
     this.cTime = 0L;
     this.checkpointTime = FSNamesystem.now();
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      format( sd );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      format(sd);
     }
   }
 
@@ -764,24 +764,24 @@ class FSImage extends Storage {
    */
   private static void saveImage(String parentPrefix, 
                                 FSDirectory.INode root, 
-                                DataOutputStream out ) throws IOException {
+                                DataOutputStream out) throws IOException {
     String fullName = "";
-    if( root.getParent() != null) {
+    if (root.getParent() != null) {
       fullName = parentPrefix + "/" + root.getLocalName();
       new UTF8(fullName).write(out);
-      out.writeShort( root.getReplication() );
-      if( root.isDir() ) {
+      out.writeShort(root.getReplication());
+      if (root.isDir()) {
         out.writeInt(0);
       } else {
         int nrBlocks = root.getBlocks().length;
-        out.writeInt( nrBlocks );
+        out.writeInt(nrBlocks);
         for (int i = 0; i < nrBlocks; i++)
           root.getBlocks()[i].write(out);
       }
     }
     for(Iterator<INode> it = root.getChildIterator(); it != null &&
-          it.hasNext(); ) {
-      saveImage( fullName, it.next(), out );
+          it.hasNext();) {
+      saveImage(fullName, it.next(), out);
     }
   }
 
@@ -793,22 +793,22 @@ class FSImage extends Storage {
    * @param out output stream
    * @throws IOException
    */
-  void saveDatanodes( DataOutputStream out ) throws IOException {
+  void saveDatanodes(DataOutputStream out) throws IOException {
     Map datanodeMap = FSNamesystem.getFSNamesystem().datanodeMap;
     int size = datanodeMap.size();
-    out.writeInt( size );
-    for( Iterator it = datanodeMap.values().iterator(); it.hasNext(); ) {
+    out.writeInt(size);
+    for(Iterator it = datanodeMap.values().iterator(); it.hasNext();) {
       DatanodeImage nodeImage = new DatanodeImage((DatanodeDescriptor) it.next());
-      nodeImage.write( out );
+      nodeImage.write(out);
     }
   }
 
-  void loadDatanodes( int version, DataInputStream in ) throws IOException {
-    if( version > -3 ) // pre datanode image version
+  void loadDatanodes(int version, DataInputStream in) throws IOException {
+    if (version > -3) // pre datanode image version
       return;
     FSNamesystem fsNamesys = FSNamesystem.getFSNamesystem();
     int size = in.readInt();
-    for( int i = 0; i < size; i++ ) {
+    for(int i = 0; i < size; i++) {
       DatanodeImage nodeImage = new DatanodeImage();
       nodeImage.readFields(in);
       fsNamesys.unprotectedAddDatanode(nodeImage.getDatanodeDescriptor());
@@ -827,9 +827,9 @@ class FSImage extends Storage {
     if (!editLog.existsNew()) {
       throw new IOException("New Edits file does not exist");
     }
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      File ckpt = getImageFile( sd, NameNodeFile.IMAGE_NEW );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      File ckpt = getImageFile(sd, NameNodeFile.IMAGE_NEW);
       if (!ckpt.exists()) {
         throw new IOException("Checkpoint file " + ckpt +
                               " does not exist");
@@ -840,10 +840,10 @@ class FSImage extends Storage {
     //
     // Renames new image
     //
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
-      File ckpt = getImageFile( sd, NameNodeFile.IMAGE_NEW );
-      File curFile = getImageFile( sd, NameNodeFile.IMAGE );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
+      File ckpt = getImageFile(sd, NameNodeFile.IMAGE_NEW);
+      File curFile = getImageFile(sd, NameNodeFile.IMAGE);
       // renameTo fails on Windows if the destination file 
       // already exists.
       if (!ckpt.renameTo(curFile)) {
@@ -860,12 +860,12 @@ class FSImage extends Storage {
     //
     this.layoutVersion = FSConstants.LAYOUT_VERSION;
     this.checkpointTime = FSNamesystem.now();
-    for( int idx = 0; idx < getNumStorageDirs(); idx++ ) {
-      StorageDirectory sd = getStorageDir( idx );
+    for(int idx = 0; idx < getNumStorageDirs(); idx++) {
+      StorageDirectory sd = getStorageDir(idx);
       try {
         sd.write();
       } catch (IOException e) {
-        LOG.error( "Cannot write file " + sd.root, e );
+        LOG.error("Cannot write file " + sd.root, e);
         editLog.processIOError(idx);
         idx--;
       }
@@ -881,7 +881,7 @@ class FSImage extends Storage {
    * Return the name of the image file.
    */
   File getFsImageName() {
-    return getImageFile( 0, NameNodeFile.IMAGE );
+    return getImageFile(0, NameNodeFile.IMAGE);
   }
 
   /**
@@ -890,8 +890,8 @@ class FSImage extends Storage {
    */
   File[] getFsImageNameCheckpoint() {
     File[] list = new File[getNumStorageDirs()];
-    for( int i = 0; i < getNumStorageDirs(); i++ ) {
-      list[i] = getImageFile( getStorageDir( i ), NameNodeFile.IMAGE_NEW );
+    for(int i = 0; i < getNumStorageDirs(); i++) {
+      list[i] = getImageFile(getStorageDir(i), NameNodeFile.IMAGE_NEW);
     }
     return list;
   }

File diff suppressed because it is too large
+ 232 - 230
src/java/org/apache/hadoop/dfs/FSNamesystem.java


+ 7 - 7
src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java

@@ -29,19 +29,19 @@ import org.apache.hadoop.util.StringUtils;
  */
 class InconsistentFSStateException extends IOException {
 
-  public InconsistentFSStateException( File dir, String descr ) {
-    super( "Directory " + getFilePath( dir )
-           + " is in an inconsistent state: " + descr );
+  public InconsistentFSStateException(File dir, String descr) {
+    super("Directory " + getFilePath(dir)
+          + " is in an inconsistent state: " + descr);
   }
 
-  public InconsistentFSStateException( File dir, String descr, Throwable ex ) {
-    this( dir, descr + "\n" + StringUtils.stringifyException(ex) );
+  public InconsistentFSStateException(File dir, String descr, Throwable ex) {
+    this(dir, descr + "\n" + StringUtils.stringifyException(ex));
   }
   
-  private static String getFilePath( File dir ) {
+  private static String getFilePath(File dir) {
     try {
       return dir.getCanonicalPath();
-    } catch( IOException e ) {}
+    } catch(IOException e) {}
     return dir.getPath();
   }
 }

+ 8 - 8
src/java/org/apache/hadoop/dfs/IncorrectVersionException.java

@@ -27,16 +27,16 @@ import java.io.IOException;
  */
 class IncorrectVersionException extends IOException {
 
-  public IncorrectVersionException( int versionReported, String ofWhat ) {
-    this( versionReported, ofWhat, FSConstants.LAYOUT_VERSION );
+  public IncorrectVersionException(int versionReported, String ofWhat) {
+    this(versionReported, ofWhat, FSConstants.LAYOUT_VERSION);
   }
   
-  public IncorrectVersionException( int versionReported,
-                                    String ofWhat,
-                                    int versionExpected ) {
-    super( "Unexpected version " 
-           + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
-           + versionReported + ". Expecting = " + versionExpected + "." );
+  public IncorrectVersionException(int versionReported,
+                                   String ofWhat,
+                                   int versionExpected) {
+    super("Unexpected version " 
+          + (ofWhat==null ? "" : "of " + ofWhat) + ". Reported: "
+          + versionReported + ". Expecting = " + versionExpected + ".");
   }
 
 }

+ 18 - 18
src/java/org/apache/hadoop/dfs/JspHelper.java

@@ -34,7 +34,7 @@ public class JspHelper {
   static Configuration conf = new Configuration();
 
   static int defaultChunkSizeToView = 
-    conf.getInt("dfs.default.chunk.view.size",32 * 1024);
+    conf.getInt("dfs.default.chunk.view.size", 32 * 1024);
   static Random rand = new Random();
 
   public JspHelper() {
@@ -140,9 +140,9 @@ public class JspHelper {
     in.close();
     out.print(new String(buf));
   }
-  public void DFSNodesStatus( ArrayList<DatanodeDescriptor> live,
-                              ArrayList<DatanodeDescriptor> dead ) {
-    if ( fsn != null )
+  public void DFSNodesStatus(ArrayList<DatanodeDescriptor> live,
+                             ArrayList<DatanodeDescriptor> dead) {
+    if (fsn != null)
       fsn.DFSNodesStatus(live, dead);
   }
   public void addTableHeader(JspWriter out) throws IOException {
@@ -161,7 +161,7 @@ public class JspHelper {
     out.print("<tr>");
       
     for (int i = 0; i < columns.length; i++) {
-      if( row/2*2 == row ) {//even
+      if (row/2*2 == row) {//even
         out.print("<td style=\"vertical-align: top;background-color:LightGrey;\"><B>"+columns[i]+"</B><br></td>");
       } else {
         out.print("<td style=\"vertical-align: top;background-color:LightBlue;\"><B>"+columns[i]+"</B><br></td>");
@@ -175,7 +175,7 @@ public class JspHelper {
   }
 
   public String getSafeModeText() {
-    if( ! fsn.isInSafeMode() )
+    if (!fsn.isInSafeMode())
       return "";
     return "Safe mode is ON. <em>" + fsn.getSafeModeTip() + "</em><br>";
   }
@@ -197,29 +197,29 @@ public class JspHelper {
       int sortOrder = SORT_ORDER_ASC;
             
       public NodeComapare(String field, String order) {
-        if ( field.equals( "lastcontact" ) ) {
+        if (field.equals("lastcontact")) {
           sortField = FIELD_LAST_CONTACT;
-        } else if ( field.equals( "size" ) ) {
+        } else if (field.equals("size")) {
           sortField = FIELD_SIZE;
-        } else if ( field.equals( "blocks" ) ) {
+        } else if (field.equals("blocks")) {
           sortField = FIELD_BLOCKS;
-        } else if ( field.equals( "pcused" ) ) {
+        } else if (field.equals("pcused")) {
           sortField = FIELD_DISK_USED;
         } else {
           sortField = FIELD_NAME;
         }
                 
-        if ( order.equals("DSC") ) {
+        if (order.equals("DSC")) {
           sortOrder = SORT_ORDER_DSC;
         } else {
           sortOrder = SORT_ORDER_ASC;
         }
       }
 
-      public int compare( DatanodeDescriptor d1,
-                          DatanodeDescriptor d2 ) {
+      public int compare(DatanodeDescriptor d1,
+                         DatanodeDescriptor d2) {
         int ret = 0;
-        switch ( sortField ) {
+        switch (sortField) {
         case FIELD_LAST_CONTACT:
           ret = (int) (d2.getLastUpdate() - d1.getLastUpdate());
           break;
@@ -228,21 +228,21 @@ public class JspHelper {
           break;
         case FIELD_SIZE:
           long  dlong = d1.getCapacity() - d2.getCapacity();
-          ret = (dlong < 0) ? -1 : ( (dlong > 0) ? 1 : 0 );
+          ret = (dlong < 0) ? -1 : ((dlong > 0) ? 1 : 0);
           break;
         case FIELD_DISK_USED:
           double ddbl =((d2.getRemaining()*1.0/d2.getCapacity())-
                         (d1.getRemaining()*1.0/d1.getCapacity()));
-          ret = (ddbl < 0) ? -1 : ( (ddbl > 0) ? 1 : 0 );
+          ret = (ddbl < 0) ? -1 : ((ddbl > 0) ? 1 : 0);
           break;
         case FIELD_NAME: 
           ret = d1.getHostName().compareTo(d2.getHostName());
           break;
         }
-        return ( sortOrder == SORT_ORDER_DSC ) ? -ret : ret;
+        return (sortOrder == SORT_ORDER_DSC) ? -ret : ret;
       }
     }
         
-    Collections.sort( nodes, new NodeComapare( field, order ) );
+    Collections.sort(nodes, new NodeComapare(field, order));
   }
 }

+ 86 - 86
src/java/org/apache/hadoop/dfs/NameNode.java

@@ -81,7 +81,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   }
     
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
-  public static final Log stateChangeLog = LogFactory.getLog( "org.apache.hadoop.dfs.StateChange");
+  public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.dfs.StateChange");
 
   private FSNamesystem namesystem;
   private Server server;
@@ -96,7 +96,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /** Format a new filesystem.  Destroys any filesystem that may already
    * exist at this location.  **/
   public static void format(Configuration conf) throws IOException {
-    format( conf, false );
+    format(conf, false);
   }
 
   private class NameNodeMetrics implements Updater {
@@ -208,7 +208,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   public NameNode(Configuration conf) throws IOException {
     InetSocketAddress addr = 
       DataNode.createSocketAddr(conf.get("fs.default.name"));
-    init( addr.getHostName(), addr.getPort(), conf );
+    init(addr.getHostName(), addr.getPort(), conf);
   }
 
   /**
@@ -221,7 +221,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   public NameNode(String bindAddress, int port, 
                   Configuration conf
                   ) throws IOException {
-    init( bindAddress, port, conf );
+    init(bindAddress, port, conf);
   }
 
   /**
@@ -239,7 +239,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    * Stop all NameNode threads and wait for all to finish.
    */
   public void stop() {
-    if (! stopRequested) {
+    if (!stopRequested) {
       stopRequested = true;
       namesystem.close();
       emptier.interrupt();
@@ -255,7 +255,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    */
   public LocatedBlock[] open(String src) throws IOException {
     String clientMachine = Server.getRemoteAddress();
-    if ( clientMachine == null ) {
+    if (clientMachine == null) {
       clientMachine = "";
     }
     Object openResults[] = namesystem.open(clientMachine, new UTF8(src));
@@ -282,7 +282,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
                              long blockSize
                              ) throws IOException {
     String clientMachine = Server.getRemoteAddress();
-    if ( clientMachine == null ) {
+    if (clientMachine == null) {
       clientMachine = "";
     }
     stateChangeLog.debug("*DIR* NameNode.create: file "
@@ -303,10 +303,10 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     return new LocatedBlock(b, targets);
   }
 
-  public boolean setReplication( String src, 
-                                 short replication
-                                 ) throws IOException {
-    return namesystem.setReplication( src, replication );
+  public boolean setReplication(String src, 
+                                short replication
+                                ) throws IOException {
+    return namesystem.setReplication(src, replication);
   }
     
   /**
@@ -328,8 +328,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    */
   public void abandonBlock(Block b, String src) throws IOException {
     stateChangeLog.debug("*BLOCK* NameNode.abandonBlock: "
-                         +b.getBlockName()+" of file "+src );
-    if (! namesystem.abandonBlock(b, new UTF8(src))) {
+                         +b.getBlockName()+" of file "+src);
+    if (!namesystem.abandonBlock(b, new UTF8(src))) {
       throw new IOException("Cannot abandon block during write to " + src);
     }
   }
@@ -337,13 +337,13 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    */
   public void abandonFileInProgress(String src, 
                                     String holder) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.abandonFileInProgress:" + src );
+    stateChangeLog.debug("*DIR* NameNode.abandonFileInProgress:" + src);
     namesystem.abandonFileInProgress(new UTF8(src), new UTF8(holder));
   }
   /**
    */
   public boolean complete(String src, String clientName) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName );
+    stateChangeLog.debug("*DIR* NameNode.complete: " + src + " for " + clientName);
     int returnCode = namesystem.completeFile(new UTF8(src), new UTF8(clientName));
     if (returnCode == STILL_WAITING) {
       return false;
@@ -375,7 +375,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
    */
   public String[][] getHints(String src, long start, long len) throws IOException {
-    return namesystem.getDatanodeHints( src, start, len );
+    return namesystem.getDatanodeHints(src, start, len);
   }
     
   public long getBlockSize(String filename) throws IOException {
@@ -385,7 +385,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
    */
   public boolean rename(String src, String dst) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst );
+    stateChangeLog.debug("*DIR* NameNode.rename: " + src + " to " + dst);
     if (!checkPathLength(dst)) {
       throw new IOException("rename: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
@@ -400,7 +400,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
    */
   public boolean delete(String src) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.delete: " + src );
+    stateChangeLog.debug("*DIR* NameNode.delete: " + src);
     return namesystem.delete(new UTF8(src));
   }
 
@@ -431,12 +431,12 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
    */
   public boolean mkdirs(String src) throws IOException {
-    stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src );
+    stateChangeLog.debug("*DIR* NameNode.mkdirs: " + src);
     if (!checkPathLength(src)) {
       throw new IOException("mkdirs: Pathname too long.  Limit " 
                             + MAX_PATH_LENGTH + " characters, " + MAX_PATH_DEPTH + " levels.");
     }
-    return namesystem.mkdirs( src );
+    return namesystem.mkdirs(src);
   }
 
   /** @deprecated */ @Deprecated
@@ -502,8 +502,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   /**
    * @inheritDoc
    */
-  public boolean setSafeMode( SafeModeAction action ) throws IOException {
-    switch( action ) {
+  public boolean setSafeMode(SafeModeAction action) throws IOException {
+    switch(action) {
     case SAFEMODE_LEAVE: // leave safe mode
       namesystem.leaveSafeMode();
       break;
@@ -567,11 +567,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   ////////////////////////////////////////////////////////////////
   /** 
    */
-  public DatanodeRegistration register( DatanodeRegistration nodeReg,
-                                        String networkLocation
-                                        ) throws IOException {
-    verifyVersion( nodeReg.getVersion() );
-    namesystem.registerDatanode( nodeReg, networkLocation );
+  public DatanodeRegistration register(DatanodeRegistration nodeReg,
+                                       String networkLocation
+                                       ) throws IOException {
+    verifyVersion(nodeReg.getVersion());
+    namesystem.registerDatanode(nodeReg, networkLocation);
       
     return nodeReg;
   }
@@ -581,25 +581,25 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    * Return a block-oriented command for the datanode to execute.
    * This will be either a transfer or a delete operation.
    */
-  public DatanodeCommand sendHeartbeat( DatanodeRegistration nodeReg,
-                                        long capacity, 
-                                        long remaining,
-                                        int xmitsInProgress,
-                                        int xceiverCount) throws IOException {
+  public DatanodeCommand sendHeartbeat(DatanodeRegistration nodeReg,
+                                       long capacity, 
+                                       long remaining,
+                                       int xmitsInProgress,
+                                       int xceiverCount) throws IOException {
     Object xferResults[] = new Object[2];
     xferResults[0] = xferResults[1] = null;
     Object deleteList[] = new Object[1];
     deleteList[0] = null; 
 
-    verifyRequest( nodeReg );
-    if( namesystem.gotHeartbeat( nodeReg, capacity, remaining, 
-                                 xceiverCount, 
-                                 xmitsInProgress,
-                                 xferResults,
-                                 deleteList)) {
+    verifyRequest(nodeReg);
+    if (namesystem.gotHeartbeat(nodeReg, capacity, remaining, 
+                                xceiverCount, 
+                                xmitsInProgress,
+                                xferResults,
+                                deleteList)) {
       // request block report from the datanode
       assert(xferResults[0] == null && deleteList[0] == null);
-      return new DatanodeCommand( DataNodeAction.DNA_REGISTER );
+      return new DatanodeCommand(DataNodeAction.DNA_REGISTER);
     }
         
     //
@@ -622,27 +622,27 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     return null;
   }
 
-  public DatanodeCommand blockReport( DatanodeRegistration nodeReg,
-                                      Block blocks[]) throws IOException {
-    verifyRequest( nodeReg );
+  public DatanodeCommand blockReport(DatanodeRegistration nodeReg,
+                                     Block blocks[]) throws IOException {
+    verifyRequest(nodeReg);
     stateChangeLog.debug("*BLOCK* NameNode.blockReport: "
-                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks" );
+                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks");
 
-    Block blocksToDelete[] = namesystem.processReport( nodeReg, blocks );
-    if( blocksToDelete != null && blocksToDelete.length > 0 )
-      return new BlockCommand( blocksToDelete );
-    if( getFSImage().isUpgradeFinalized() )
-      return new DatanodeCommand( DataNodeAction.DNA_FINALIZE );
+    Block blocksToDelete[] = namesystem.processReport(nodeReg, blocks);
+    if (blocksToDelete != null && blocksToDelete.length > 0)
+      return new BlockCommand(blocksToDelete);
+    if (getFSImage().isUpgradeFinalized())
+      return new DatanodeCommand(DataNodeAction.DNA_FINALIZE);
     return null;
   }
 
   public void blockReceived(DatanodeRegistration nodeReg, 
                             Block blocks[]) throws IOException {
-    verifyRequest( nodeReg );
+    verifyRequest(nodeReg);
     stateChangeLog.debug("*BLOCK* NameNode.blockReceived: "
-                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks." );
+                         +"from "+nodeReg.getName()+" "+blocks.length+" blocks.");
     for (int i = 0; i < blocks.length; i++) {
-      namesystem.blockReceived( nodeReg, blocks[i] );
+      namesystem.blockReceived(nodeReg, blocks[i]);
     }
   }
 
@@ -653,12 +653,12 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
                           String msg) throws IOException {
     // Log error message from datanode
     LOG.info("Report from " + nodeReg.getName() + ": " + msg);
-    if( errorCode == DatanodeProtocol.NOTIFY ) {
+    if (errorCode == DatanodeProtocol.NOTIFY) {
       return;
     }
-    verifyRequest( nodeReg );
-    if( errorCode == DatanodeProtocol.DISK_ERROR ) {
-      namesystem.removeDatanode( nodeReg );            
+    verifyRequest(nodeReg);
+    if (errorCode == DatanodeProtocol.DISK_ERROR) {
+      namesystem.removeDatanode(nodeReg);            
     }
   }
     
@@ -675,10 +675,10 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    * @param nodeReg data node registration
    * @throws IOException
    */
-  public void verifyRequest( DatanodeRegistration nodeReg ) throws IOException {
-    verifyVersion( nodeReg.getVersion() );
-    if( ! namesystem.getRegistrationID().equals( nodeReg.getRegistrationID() ))
-      throw new UnregisteredDatanodeException( nodeReg );
+  public void verifyRequest(DatanodeRegistration nodeReg) throws IOException {
+    verifyVersion(nodeReg.getVersion());
+    if (!namesystem.getRegistrationID().equals(nodeReg.getRegistrationID()))
+      throw new UnregisteredDatanodeException(nodeReg);
   }
     
   /**
@@ -687,9 +687,9 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    * @param version
    * @throws IOException
    */
-  public void verifyVersion( int version ) throws IOException {
-    if( version != LAYOUT_VERSION )
-      throw new IncorrectVersionException( version, "data node" );
+  public void verifyVersion(int version) throws IOException {
+    if (version != LAYOUT_VERSION)
+      throw new IncorrectVersionException(version, "data node");
   }
 
   /**
@@ -739,22 +739,22 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   private static boolean format(Configuration conf,
                                 boolean isConfirmationNeeded
                                 ) throws IOException {
-    Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs( conf );
-    for( Iterator<File> it = dirsToFormat.iterator(); it.hasNext(); ) {
+    Collection<File> dirsToFormat = FSNamesystem.getNamespaceDirs(conf);
+    for(Iterator<File> it = dirsToFormat.iterator(); it.hasNext();) {
       File curDir = it.next();
-      if( ! curDir.exists() )
+      if (!curDir.exists())
         continue;
-      if( isConfirmationNeeded ) {
+      if (isConfirmationNeeded) {
         System.err.print("Re-format filesystem in " + curDir +" ? (Y or N) ");
         if (!(System.in.read() == 'Y')) {
           System.err.println("Format aborted in "+ curDir);
           return true;
         }
-        while( System.in.read() != '\n' ); // discard the enter-key
+        while(System.in.read() != '\n'); // discard the enter-key
       }
     }
 
-    FSNamesystem nsys = new FSNamesystem(new FSImage( dirsToFormat ));
+    FSNamesystem nsys = new FSNamesystem(new FSImage(dirsToFormat));
     nsys.dir.fsImage.format();
     return false;
   }
@@ -765,38 +765,38 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   }
 
   private static StartupOption parseArguments(String args[], 
-                                              Configuration conf ) {
+                                              Configuration conf) {
     int argsLen = (args == null) ? 0 : args.length;
     StartupOption startOpt = StartupOption.REGULAR;
-    for( int i=0; i < argsLen; i++ ) {
+    for(int i=0; i < argsLen; i++) {
       String cmd = args[i];
-      if( "-format".equalsIgnoreCase(cmd) ) {
+      if ("-format".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.FORMAT;
-      } else if( "-regular".equalsIgnoreCase(cmd) ) {
+      } else if ("-regular".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.REGULAR;
-      } else if( "-upgrade".equalsIgnoreCase(cmd) ) {
+      } else if ("-upgrade".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.UPGRADE;
-      } else if( "-rollback".equalsIgnoreCase(cmd) ) {
+      } else if ("-rollback".equalsIgnoreCase(cmd)) {
         startOpt = StartupOption.ROLLBACK;
       } else
         return null;
     }
-    conf.setObject( "dfs.namenode.startup", startOpt );
+    conf.setObject("dfs.namenode.startup", startOpt);
     return startOpt;
   }
 
-  static NameNode createNameNode( String argv[], 
-                                  Configuration conf ) throws IOException {
-    if( conf == null )
+  static NameNode createNameNode(String argv[], 
+                                 Configuration conf) throws IOException {
+    if (conf == null)
       conf = new Configuration();
-    StartupOption startOpt = parseArguments( argv, conf );
-    if( startOpt == null ) {
+    StartupOption startOpt = parseArguments(argv, conf);
+    if (startOpt == null) {
       printUsage();
       return null;
     }
       
-    if( startOpt == StartupOption.FORMAT ) {
-      boolean aborted = format( conf, true );
+    if (startOpt == StartupOption.FORMAT) {
+      boolean aborted = format(conf, true);
       System.exit(aborted ? 1 : 0);
     }
       
@@ -808,11 +808,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
    */
   public static void main(String argv[]) throws Exception {
     try {
-      NameNode namenode = createNameNode( argv, null );
-      if( namenode != null )
+      NameNode namenode = createNameNode(argv, null);
+      if (namenode != null)
         namenode.join();
-    } catch ( Throwable e ) {
-      LOG.error( StringUtils.stringifyException( e ) );
+    } catch (Throwable e) {
+      LOG.error(StringUtils.stringifyException(e));
       System.exit(-1);
     }
   }

+ 24 - 24
src/java/org/apache/hadoop/dfs/NamenodeFsck.java

@@ -95,9 +95,9 @@ public class NamenodeFsck {
    * @throws IOException
    */
   public NamenodeFsck(Configuration conf,
-      NameNode nn,
-      Map<String,String[]> pmap,
-      HttpServletResponse response) throws IOException {
+                      NameNode nn,
+                      Map<String,String[]> pmap,
+                      HttpServletResponse response) throws IOException {
     this.conf = conf;
     this.nn = nn;
     this.out = response.getWriter();
@@ -215,13 +215,13 @@ public class NamenodeFsck {
       }
       res.corruptFiles++;
       switch(fixing) {
-        case FIXING_NONE:
-          break;
-        case FIXING_MOVE:
-          lostFoundMove(file, blocks);
-          break;
-        case FIXING_DELETE:
-          nn.delete(file.getPath());
+      case FIXING_NONE:
+        break;
+      case FIXING_MOVE:
+        lostFoundMove(file, blocks);
+        break;
+      case FIXING_DELETE:
+        nn.delete(file.getPath());
       }
     }
     if (showFiles) {
@@ -237,9 +237,9 @@ public class NamenodeFsck {
   }
   
   private void lostFoundMove(DFSFileInfo file, LocatedBlock[] blocks)
-  throws IOException {
+    throws IOException {
     DFSClient dfs = new DFSClient(DataNode.createSocketAddr(
-        conf.get("fs.default.name", "local")), conf);
+                                                            conf.get("fs.default.name", "local")), conf);
     if (!lfInited) {
       lostFoundInit(dfs);
     }
@@ -304,8 +304,8 @@ public class NamenodeFsck {
    * bad. Both places should be refactored to provide a method to copy blocks
    * around.
    */
-      private void copyBlock(DFSClient dfs, LocatedBlock lblock,
-          OutputStream fos) throws Exception {
+  private void copyBlock(DFSClient dfs, LocatedBlock lblock,
+                         OutputStream fos) throws Exception {
     int failures = 0;
     InetSocketAddress targetAddr = null;
     TreeSet<DatanodeInfo> deadNodes = new TreeSet<DatanodeInfo>();
@@ -398,11 +398,11 @@ public class NamenodeFsck {
    * Pick the best node from which to stream the data.
    * That's the local one, if available.
    */
-      Random r = new Random();
+  Random r = new Random();
   private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
-      TreeSet<DatanodeInfo> deadNodes) throws IOException {
+                                TreeSet<DatanodeInfo> deadNodes) throws IOException {
     if ((nodes == null) ||
-            (nodes.length - deadNodes.size() < 1)) {
+        (nodes.length - deadNodes.size() < 1)) {
       throw new IOException("No live nodes contain current block");
     }
     DatanodeInfo chosenNode = null;
@@ -433,12 +433,12 @@ public class NamenodeFsck {
         lfInitedOk = dfs.mkdirs(lfName);
         lostFound = lfName;
       } else        if (!dfs.isDirectory(lfName)) {
-          LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
-          lfInitedOk = false;
-        }  else { // exists and isDirectory
-          lostFound = lfName;
-          lfInitedOk = true;
-        }
+        LOG.warn("Cannot use /lost+found : a regular file with this name exists.");
+        lfInitedOk = false;
+      }  else { // exists and isDirectory
+        lostFound = lfName;
+        lfInitedOk = true;
+      }
     }  catch (Exception e) {
       e.printStackTrace();
       lfInitedOk = false;
@@ -584,7 +584,7 @@ public class NamenodeFsck {
       res.append("\n Total size:\t" + totalSize + " B");
       res.append("\n Total blocks:\t" + totalBlocks);
       if (totalBlocks > 0) res.append(" (avg. block size "
-          + (totalSize / totalBlocks) + " B)");
+                                      + (totalSize / totalBlocks) + " B)");
       res.append("\n Total dirs:\t" + totalDirs);
       res.append("\n Total files:\t" + totalFiles);
       if (missingSize > 0) {

+ 7 - 7
src/java/org/apache/hadoop/dfs/NamespaceInfo.java

@@ -41,8 +41,8 @@ class NamespaceInfo extends StorageInfo implements Writable {
     buildVersion = null;
   }
   
-  public NamespaceInfo( int nsID, long cT ) {
-    super( FSConstants.LAYOUT_VERSION, nsID, cT );
+  public NamespaceInfo(int nsID, long cT) {
+    super(FSConstants.LAYOUT_VERSION, nsID, cT);
     buildVersion = Storage.getBuildVersion();
   }
   
@@ -60,14 +60,14 @@ class NamespaceInfo extends StorageInfo implements Writable {
   }
 
   public void write(DataOutput out) throws IOException {
-    UTF8.writeString( out, getBuildVersion() );
-    out.writeInt( getLayoutVersion() );
-    out.writeInt( getNamespaceID() );
-    out.writeLong( getCTime() );
+    UTF8.writeString(out, getBuildVersion());
+    out.writeInt(getLayoutVersion());
+    out.writeInt(getNamespaceID());
+    out.writeLong(getCTime());
   }
 
   public void readFields(DataInput in) throws IOException {
-    buildVersion = UTF8.readString( in );
+    buildVersion = UTF8.readString(in);
     layoutVersion = in.readInt();
     namespaceID = in.readInt();
     cTime = in.readLong();

+ 2 - 2
src/java/org/apache/hadoop/dfs/SafeModeException.java

@@ -10,8 +10,8 @@ import java.io.IOException;
  */
 public class SafeModeException extends IOException {
 
-  public SafeModeException( String text, FSNamesystem.SafeModeInfo mode  ) {
-    super( text + ". Name node is in safe mode.\n" + mode.getTurnOffTip());
+  public SafeModeException(String text, FSNamesystem.SafeModeInfo mode ) {
+    super(text + ". Name node is in safe mode.\n" + mode.getTurnOffTip());
   }
 
 }

+ 3 - 3
src/java/org/apache/hadoop/dfs/SecondaryNameNode.java

@@ -426,9 +426,9 @@ public class SecondaryNameNode implements FSConstants, Runnable {
    */
   public static class GetImageServlet extends HttpServlet {
     @SuppressWarnings("unchecked")
-      public void doGet(HttpServletRequest request,
-                        HttpServletResponse response
-                        ) throws ServletException, IOException {
+    public void doGet(HttpServletRequest request,
+                      HttpServletResponse response
+                      ) throws ServletException, IOException {
       Map<String,String[]> pmap = request.getParameterMap();
       try {
         ServletContext context = getServletContext();

+ 159 - 159
src/java/org/apache/hadoop/dfs/Storage.java

@@ -38,7 +38,7 @@ import org.apache.hadoop.util.VersionInfo;
 /**
  * Common class for storage information.
  * 
- * TODO namespaceID should be long and computed as hash( address + port )
+ * TODO namespaceID should be long and computed as hash(address + port)
  * @author Konstantin Shvachko
  */
 class StorageInfo {
@@ -47,16 +47,16 @@ class StorageInfo {
   long  cTime;          // creation timestamp
   
   StorageInfo () {
-    this( 0, 0, 0L );
+    this(0, 0, 0L);
   }
   
-  StorageInfo( int layoutV, int nsID, long cT ) {
+  StorageInfo(int layoutV, int nsID, long cT) {
     layoutVersion = layoutV;
     namespaceID = nsID;
     cTime = cT;
   }
   
-  StorageInfo( StorageInfo from ) {
+  StorageInfo(StorageInfo from) {
     layoutVersion = from.layoutVersion;
     namespaceID = from.namespaceID;
     cTime = from.cTime;
@@ -124,7 +124,7 @@ abstract class Storage extends StorageInfo {
     File              root; // root directory
     FileLock          lock; // storage lock
     
-    StorageDirectory( File dir ) {
+    StorageDirectory(File dir) {
       this.root = dir;
       this.lock = null;
     }
@@ -135,17 +135,17 @@ abstract class Storage extends StorageInfo {
      * @throws IOException if file cannot be read or contains inconsistent data
      */
     void read() throws IOException {
-      read( getVersionFile() );
+      read(getVersionFile());
     }
     
-    void read( File from ) throws IOException {
-      RandomAccessFile file = new RandomAccessFile( from, "rws" );
+    void read(File from) throws IOException {
+      RandomAccessFile file = new RandomAccessFile(from, "rws");
       try {
-        FileInputStream in = new FileInputStream( file.getFD() );
+        FileInputStream in = new FileInputStream(file.getFD());
         file.seek(0);
         Properties props = new Properties();
-        props.load( in );
-        getFields( props, this );
+        props.load(in);
+        getFields(props, this);
       } finally {
         file.close();
       }
@@ -157,17 +157,17 @@ abstract class Storage extends StorageInfo {
      * @throws IOException
      */
     void write() throws IOException {
-      write( getVersionFile() );
+      write(getVersionFile());
     }
 
-    void write( File to ) throws IOException {
+    void write(File to) throws IOException {
       Properties props = new Properties();
-      setFields( props, this );
-      RandomAccessFile file = new RandomAccessFile( to, "rws" );
+      setFields(props, this);
+      RandomAccessFile file = new RandomAccessFile(to, "rws");
       try {
         file.seek(0);
-        FileOutputStream out = new FileOutputStream( file.getFD() );
-        props.store( out, null );
+        FileOutputStream out = new FileOutputStream(file.getFD());
+        props.store(out, null);
       } finally {
         file.close();
       }
@@ -188,33 +188,33 @@ abstract class Storage extends StorageInfo {
      */
     void clearDirectory() throws IOException {
       File curDir = this.getCurrentDir();
-      if( curDir.exists() )
-        if( ! (FileUtil.fullyDelete( curDir )) )
-          throw new IOException("Cannot remove current directory: " + curDir );
-      if( ! curDir.mkdirs() )
-        throw new IOException( "Cannot create directory " + curDir );
+      if (curDir.exists())
+        if (!(FileUtil.fullyDelete(curDir)))
+          throw new IOException("Cannot remove current directory: " + curDir);
+      if (!curDir.mkdirs())
+        throw new IOException("Cannot create directory " + curDir);
     }
 
     File getCurrentDir() {
-      return new File( root, STORAGE_DIR_CURRENT );
+      return new File(root, STORAGE_DIR_CURRENT);
     }
     File getVersionFile() {
-      return new File( new File( root, STORAGE_DIR_CURRENT ), STORAGE_FILE_VERSION );
+      return new File(new File(root, STORAGE_DIR_CURRENT), STORAGE_FILE_VERSION);
     }
     File getPreviousVersionFile() {
-      return new File( new File( root, STORAGE_DIR_PREVIOUS ), STORAGE_FILE_VERSION );
+      return new File(new File(root, STORAGE_DIR_PREVIOUS), STORAGE_FILE_VERSION);
     }
     File getPreviousDir() {
-      return new File( root, STORAGE_DIR_PREVIOUS );
+      return new File(root, STORAGE_DIR_PREVIOUS);
     }
     File getPreviousTmp() {
-      return new File( root, STORAGE_TMP_PREVIOUS );
+      return new File(root, STORAGE_TMP_PREVIOUS);
     }
     File getRemovedTmp() {
-      return new File( root, STORAGE_TMP_REMOVED );
+      return new File(root, STORAGE_TMP_REMOVED);
     }
     File getFinalizedTmp() {
-      return new File( root, STORAGE_TMP_FINALIZED );
+      return new File(root, STORAGE_TMP_FINALIZED);
     }
 
     /**
@@ -226,40 +226,40 @@ abstract class Storage extends StorageInfo {
      * @throws {@link InconsistentFSStateException} if directory state is not 
      * consistent and cannot be recovered 
      */
-    StorageState analyzeStorage( StartupOption startOpt ) throws IOException {
+    StorageState analyzeStorage(StartupOption startOpt) throws IOException {
       assert root != null : "root is null";
       String rootPath = root.getCanonicalPath();
       try { // check that storage exists
-        if( ! root.exists() ) {
+        if (!root.exists()) {
           // storage directory does not exist
-          if( startOpt != StartupOption.FORMAT ) {
-            LOG.info( "Storage directory " + rootPath + " does not exist." );
+          if (startOpt != StartupOption.FORMAT) {
+            LOG.info("Storage directory " + rootPath + " does not exist.");
             return StorageState.NON_EXISTENT;
           }
-          LOG.info( rootPath + " does not exist. Creating ..." );
-          if( ! root.mkdirs() )
-            throw new IOException( "Cannot create directory " + rootPath );
+          LOG.info(rootPath + " does not exist. Creating ...");
+          if (!root.mkdirs())
+            throw new IOException("Cannot create directory " + rootPath);
         }
         // or is inaccessible
-        if( ! root.isDirectory() ) {
-          LOG.info( rootPath + "is not a directory." );
+        if (!root.isDirectory()) {
+          LOG.info(rootPath + "is not a directory.");
           return StorageState.NON_EXISTENT;
         }
-        if( ! root.canWrite() ) {
-          LOG.info( "Cannot access storage directory " + rootPath );
+        if (!root.canWrite()) {
+          LOG.info("Cannot access storage directory " + rootPath);
           return StorageState.NON_EXISTENT;
         }
-      } catch( SecurityException ex ) {
-        LOG.info( "Cannot access storage directory " + rootPath, ex );
+      } catch(SecurityException ex) {
+        LOG.info("Cannot access storage directory " + rootPath, ex);
         return StorageState.NON_EXISTENT;
       }
 
       this.lock(); // lock storage if it exists
 
-      if( startOpt == StartupOption.FORMAT )
+      if (startOpt == StartupOption.FORMAT)
         return StorageState.NOT_FORMATTED;
       // check whether a conversion is required
-      if( isConversionNeeded( this ) )
+      if (isConversionNeeded(this))
         return StorageState.CONVERT;
       // check whether current directory is valid
       File versionFile = getVersionFile();
@@ -271,48 +271,48 @@ abstract class Storage extends StorageInfo {
       boolean hasRemovedTmp = getRemovedTmp().exists();
       boolean hasFinalizedTmp = getFinalizedTmp().exists();
 
-      if( !(hasPreviousTmp || hasRemovedTmp || hasFinalizedTmp) ) {
+      if (!(hasPreviousTmp || hasRemovedTmp || hasFinalizedTmp)) {
         // no temp dirs - no recovery
-        if( hasCurrent )
+        if (hasCurrent)
           return StorageState.NORMAL;
-        if( hasPrevious )
-          throw new InconsistentFSStateException( root,
-                      "version file in current directory it is missing." );
+        if (hasPrevious)
+          throw new InconsistentFSStateException(root,
+                                                 "version file in current directory it is missing.");
         return StorageState.NOT_FORMATTED;
       }
 
-      if( (hasPreviousTmp?1:0)+(hasRemovedTmp?1:0)+(hasFinalizedTmp?1:0) > 1 )
+      if ((hasPreviousTmp?1:0)+(hasRemovedTmp?1:0)+(hasFinalizedTmp?1:0) > 1)
         // more than one temp dirs
-        throw new InconsistentFSStateException( root,
-                    "too many temporary directories." );
+        throw new InconsistentFSStateException(root,
+                                               "too many temporary directories.");
 
       // # of temp dirs == 1 should either recover or complete a transition
-      if( hasFinalizedTmp ) {
-        if( hasPrevious )
-          throw new InconsistentFSStateException( root,
-              STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_FINALIZED
-              + "cannot exist together." );
+      if (hasFinalizedTmp) {
+        if (hasPrevious)
+          throw new InconsistentFSStateException(root,
+                                                 STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_FINALIZED
+                                                 + "cannot exist together.");
         return StorageState.COMPLETE_FINALIZE;
       }
 
-      if( hasPreviousTmp ) {
-        if( hasPrevious )
-          throw new InconsistentFSStateException( root,
-              STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_PREVIOUS
-              + " cannot exist together." );
-        if( hasCurrent )
+      if (hasPreviousTmp) {
+        if (hasPrevious)
+          throw new InconsistentFSStateException(root,
+                                                 STORAGE_DIR_PREVIOUS + " and " + STORAGE_TMP_PREVIOUS
+                                                 + " cannot exist together.");
+        if (hasCurrent)
           return StorageState.COMPLETE_UPGRADE;
         return StorageState.RECOVER_UPGRADE;
       }
       
       assert hasRemovedTmp : "hasRemovedTmp must be true";
-      if( !(hasCurrent ^ hasPrevious) )
-        throw new InconsistentFSStateException( root,
-            "one and only one directory " + STORAGE_DIR_CURRENT 
-            + " or " + STORAGE_DIR_PREVIOUS 
-            + " must be present when " + STORAGE_TMP_REMOVED
-            + " exists." );
-      if( hasCurrent )
+      if (!(hasCurrent ^ hasPrevious))
+        throw new InconsistentFSStateException(root,
+                                               "one and only one directory " + STORAGE_DIR_CURRENT 
+                                               + " or " + STORAGE_DIR_PREVIOUS 
+                                               + " must be present when " + STORAGE_TMP_REMOVED
+                                               + " exists.");
+      if (hasCurrent)
         return StorageState.COMPLETE_ROLLBACK;
       return StorageState.RECOVER_ROLLBACK;
     }
@@ -323,39 +323,39 @@ abstract class Storage extends StorageInfo {
      * @param curState specifies what/how the state should be recovered
      * @throws IOException
      */
-    void doRecover( StorageState curState ) throws IOException {
+    void doRecover(StorageState curState) throws IOException {
       File curDir = getCurrentDir();
       String rootPath = root.getCanonicalPath();
-      switch( curState ) {
-        case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
-          LOG.info( "Completing previous upgrade for storage directory " 
-                    + rootPath + "." );
-          rename( getPreviousTmp(), getPreviousDir() );
-          return;
-        case RECOVER_UPGRADE:   // mv previous.tmp -> current
-          LOG.info( "Recovering storage directory " + rootPath
-                    + " from previous upgrade." );
-          if( curDir.exists() )
-            deleteDir( curDir );
-          rename( getPreviousTmp(), curDir );
-          return;
-        case COMPLETE_ROLLBACK: // rm removed.tmp
-          LOG.info( "Completing previous rollback for storage directory "
-                    + rootPath + "." );
-          deleteDir( getRemovedTmp() );
-          return;
-        case RECOVER_ROLLBACK:  // mv removed.tmp -> current
-          LOG.info( "Recovering storage directory " + rootPath
-                    + " from previous rollback." );
-          rename( getRemovedTmp(), curDir );
-          return;
-        case COMPLETE_FINALIZE: // rm finalized.tmp
-          LOG.info( "Completing previous finalize for storage directory "
-                    + rootPath + "." );
-          deleteDir( getFinalizedTmp() );
-          return;
-        default:
-          throw new IOException( "Unexpected FS state: " + curState );
+      switch(curState) {
+      case COMPLETE_UPGRADE:  // mv previous.tmp -> previous
+        LOG.info("Completing previous upgrade for storage directory " 
+                 + rootPath + ".");
+        rename(getPreviousTmp(), getPreviousDir());
+        return;
+      case RECOVER_UPGRADE:   // mv previous.tmp -> current
+        LOG.info("Recovering storage directory " + rootPath
+                 + " from previous upgrade.");
+        if (curDir.exists())
+          deleteDir(curDir);
+        rename(getPreviousTmp(), curDir);
+        return;
+      case COMPLETE_ROLLBACK: // rm removed.tmp
+        LOG.info("Completing previous rollback for storage directory "
+                 + rootPath + ".");
+        deleteDir(getRemovedTmp());
+        return;
+      case RECOVER_ROLLBACK:  // mv removed.tmp -> current
+        LOG.info("Recovering storage directory " + rootPath
+                 + " from previous rollback.");
+        rename(getRemovedTmp(), curDir);
+        return;
+      case COMPLETE_FINALIZE: // rm finalized.tmp
+        LOG.info("Completing previous finalize for storage directory "
+                 + rootPath + ".");
+        deleteDir(getFinalizedTmp());
+        return;
+      default:
+        throw new IOException("Unexpected FS state: " + curState);
       }
     }
 
@@ -365,22 +365,22 @@ abstract class Storage extends StorageInfo {
      * @throws IOException if locking fails
      */
     void lock() throws IOException {
-      File lockF = new File( root, STORAGE_FILE_LOCK );
+      File lockF = new File(root, STORAGE_FILE_LOCK);
       lockF.deleteOnExit();
-      RandomAccessFile file = new RandomAccessFile( lockF, "rws" );
+      RandomAccessFile file = new RandomAccessFile(lockF, "rws");
       try {
         this.lock = file.getChannel().tryLock();
-      } catch( IOException e ) {
-        LOG.info( StringUtils.stringifyException(e) );
+      } catch(IOException e) {
+        LOG.info(StringUtils.stringifyException(e));
         file.close();
         throw e;
       }
-      if( lock == null ) {
+      if (lock == null) {
         String msg = "Cannot lock storage " + this.root 
-                      + ". The directory is already locked.";
-        LOG.info( msg );
+          + ". The directory is already locked.";
+        LOG.info(msg);
         file.close();
-        throw new IOException( msg );
+        throw new IOException(msg);
       }
     }
 
@@ -390,7 +390,7 @@ abstract class Storage extends StorageInfo {
      * @throws IOException
      */
     void unlock() throws IOException {
-      if( this.lock == null )
+      if (this.lock == null)
         return;
       this.lock.release();
       lock.channel().close();
@@ -400,18 +400,18 @@ abstract class Storage extends StorageInfo {
   /**
    * Create empty storage info of the specified type
    */
-  Storage( NodeType type ) {
+  Storage(NodeType type) {
     super();
     this.storageType = type;
   }
   
-  Storage( NodeType type, int nsID, long cT ) {
-    super( FSConstants.LAYOUT_VERSION, nsID, cT );
+  Storage(NodeType type, int nsID, long cT) {
+    super(FSConstants.LAYOUT_VERSION, nsID, cT);
     this.storageType = type;
   }
   
-  Storage( NodeType type, StorageInfo storageInfo ) {
-    super( storageInfo );
+  Storage(NodeType type, StorageInfo storageInfo) {
+    super(storageInfo);
     this.storageType = type;
   }
   
@@ -419,15 +419,15 @@ abstract class Storage extends StorageInfo {
     return storageDirs.size();
   }
   
-  StorageDirectory getStorageDir( int idx ) {
-    return storageDirs.get( idx );
+  StorageDirectory getStorageDir(int idx) {
+    return storageDirs.get(idx);
   }
   
-  protected void addStorageDir( StorageDirectory sd ) {
-    storageDirs.add( sd );
+  protected void addStorageDir(StorageDirectory sd) {
+    storageDirs.add(sd);
   }
   
-  abstract boolean isConversionNeeded( StorageDirectory sd ) throws IOException;
+  abstract boolean isConversionNeeded(StorageDirectory sd) throws IOException;
   
   /**
    * Get common storage fields.
@@ -436,28 +436,28 @@ abstract class Storage extends StorageInfo {
    * @param props
    * @throws IOException
    */
-  protected void getFields( Properties props, 
-                            StorageDirectory sd 
-                          ) throws IOException {
+  protected void getFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
     String sv, st, sid, sct;
-    sv = props.getProperty( "layoutVersion" );
-    st = props.getProperty( "storageType" );
-    sid = props.getProperty( "namespaceID" );
-    sct = props.getProperty( "cTime" );
-    if( sv == null || st == null || sid == null || sct == null )
-      throw new InconsistentFSStateException( sd.root,
-                    "file " + STORAGE_FILE_VERSION + " is invalid." );
-    int rv = Integer.parseInt( sv );
-    NodeType rt = NodeType.valueOf( st );
-    int rid = Integer.parseInt( sid );
-    long rct = Long.parseLong( sct );
-    if( ! storageType.equals( rt ) ||
-        ! (( namespaceID == 0 ) || ( rid == 0 ) || namespaceID == rid ))
-      throw new InconsistentFSStateException( sd.root,
-                  "is incompatible with others." );
-    if( rv < FSConstants.LAYOUT_VERSION ) // future version
-        throw new IncorrectVersionException(rv, "storage directory " 
-                                            + sd.root.getCanonicalPath() );
+    sv = props.getProperty("layoutVersion");
+    st = props.getProperty("storageType");
+    sid = props.getProperty("namespaceID");
+    sct = props.getProperty("cTime");
+    if (sv == null || st == null || sid == null || sct == null)
+      throw new InconsistentFSStateException(sd.root,
+                                             "file " + STORAGE_FILE_VERSION + " is invalid.");
+    int rv = Integer.parseInt(sv);
+    NodeType rt = NodeType.valueOf(st);
+    int rid = Integer.parseInt(sid);
+    long rct = Long.parseLong(sct);
+    if (!storageType.equals(rt) ||
+        !((namespaceID == 0) || (rid == 0) || namespaceID == rid))
+      throw new InconsistentFSStateException(sd.root,
+                                             "is incompatible with others.");
+    if (rv < FSConstants.LAYOUT_VERSION) // future version
+      throw new IncorrectVersionException(rv, "storage directory " 
+                                          + sd.root.getCanonicalPath());
     layoutVersion = rv;
     storageType = rt;
     namespaceID = rid;
@@ -471,24 +471,24 @@ abstract class Storage extends StorageInfo {
    * @param props
    * @throws IOException
    */
-  protected void setFields( Properties props, 
-                            StorageDirectory sd 
-                          ) throws IOException {
-    props.setProperty( "layoutVersion", String.valueOf( layoutVersion ));
-    props.setProperty( "storageType", storageType.toString() );
-    props.setProperty( "namespaceID", String.valueOf( namespaceID ));
-    props.setProperty( "cTime", String.valueOf( cTime ));
+  protected void setFields(Properties props, 
+                           StorageDirectory sd 
+                           ) throws IOException {
+    props.setProperty("layoutVersion", String.valueOf(layoutVersion));
+    props.setProperty("storageType", storageType.toString());
+    props.setProperty("namespaceID", String.valueOf(namespaceID));
+    props.setProperty("cTime", String.valueOf(cTime));
   }
 
-  static void rename( File from, File to ) throws IOException {
-    if( ! from.renameTo( to ))
-      throw new IOException( "Failed to rename " 
-          + from.getCanonicalPath() + " to " + to.getCanonicalPath() );
+  static void rename(File from, File to) throws IOException {
+    if (!from.renameTo(to))
+      throw new IOException("Failed to rename " 
+                            + from.getCanonicalPath() + " to " + to.getCanonicalPath());
   }
 
-  static void deleteDir( File dir ) throws IOException {
-    if( ! FileUtil.fullyDelete( dir ) )
-      throw new IOException( "Failed to delete " + dir.getCanonicalPath() );
+  static void deleteDir(File dir) throws IOException {
+    if (!FileUtil.fullyDelete(dir))
+      throw new IOException("Failed to delete " + dir.getCanonicalPath());
   }
   
   /**
@@ -516,9 +516,9 @@ abstract class Storage extends StorageInfo {
     return VersionInfo.getRevision();
   }
 
-  static String getRegistrationID( StorageInfo storage ) {
-    return "NS-" + Integer.toString( storage.getNamespaceID() )
-           + "-" + Integer.toString( storage.getLayoutVersion() )
-           + "-" + Long.toString( storage.getCTime() );
+  static String getRegistrationID(StorageInfo storage) {
+    return "NS-" + Integer.toString(storage.getNamespaceID())
+      + "-" + Integer.toString(storage.getLayoutVersion())
+      + "-" + Long.toString(storage.getCTime());
   }
 }

+ 4 - 4
src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java

@@ -11,12 +11,12 @@ import java.io.IOException;
  */
 class UnregisteredDatanodeException extends IOException {
 
-  public UnregisteredDatanodeException( DatanodeID nodeID ) {
-    super("Unregistered data node: " + nodeID.getName() );
+  public UnregisteredDatanodeException(DatanodeID nodeID) {
+    super("Unregistered data node: " + nodeID.getName());
   }
 
-  public UnregisteredDatanodeException( DatanodeID nodeID, 
-                                        DatanodeInfo storedNode ) {
+  public UnregisteredDatanodeException(DatanodeID nodeID, 
+                                       DatanodeInfo storedNode) {
     super("Data node " + nodeID.getName() 
           + " is attempting to report storage ID "
           + nodeID.getStorageID() + ". Node " 

+ 4 - 4
src/java/org/apache/hadoop/filecache/DistributedCache.java

@@ -288,9 +288,9 @@ public class DistributedCache {
     byte[] digest = null;
 
     FileSystem fileSystem = getFileSystem(cache, conf);
-    if(!(fileSystem instanceof ChecksumFileSystem)) {
-      throw new IOException( "Not a checksummed file system: "
-                             +fileSystem.getUri() );
+    if (!(fileSystem instanceof ChecksumFileSystem)) {
+      throw new IOException("Not a checksummed file system: "
+                            +fileSystem.getUri());
     }
     String filename = cache.getPath();
     Path filePath = new Path(filename);
@@ -304,7 +304,7 @@ public class DistributedCache {
     }
     if (!fileSystem.exists(md5File)) {
       ChecksumFileSystem checksumFs;
-      if(!(fileSystem instanceof ChecksumFileSystem)) {
+      if (!(fileSystem instanceof ChecksumFileSystem)) {
         throw new IOException(
                               "Not a checksumed file system: "+fileSystem.getUri());
       } else {

+ 22 - 22
src/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -112,7 +112,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
       this.file = file;
       Path sumFile = fs.getChecksumFile(file);
       try {
-        int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(),bufferSize);
+        int sumBufferSize = fs.getSumBufferSize(fs.getBytesPerSum(), bufferSize);
         sums = fs.getRawFileSystem().open(sumFile, sumBufferSize);
 
         byte[] version = new byte[CHECKSUM_VERSION.length];
@@ -133,14 +133,14 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     public void seek(long desired) throws IOException {
       // seek to a checksum boundary
       long checksumBoundary = desired/bytesPerSum*bytesPerSum;
-      if(checksumBoundary != getPos()) {
+      if (checksumBoundary != getPos()) {
         datas.seek(checksumBoundary);
-        if(sums != null) {
+        if (sums != null) {
           sums.seek(HEADER_LENGTH + 4*(checksumBoundary/bytesPerSum));
         }
       }
       
-      if(sums != null) {
+      if (sums != null) {
         sum.reset();
         inSum = 0;
       }
@@ -207,9 +207,9 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
               summed += toSum;
               
               inSum += toSum;
-              if (inSum == bytesPerSum ) {
+              if (inSum == bytesPerSum) {
                 verifySum(read-(summed-bytesPerSum));
-              } else if( read == summed && endOfFile ) {
+              } else if (read == summed && endOfFile) {
                 verifySum(read-read/bytesPerSum*bytesPerSum);
               }
             }
@@ -314,7 +314,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     }
 
     @Override
-      public boolean seekToNewSource(long targetPos) throws IOException {
+    public boolean seekToNewSource(long targetPos) throws IOException {
       return datas.seekToNewSource(targetPos) ||
         sums.seekToNewSource(targetPos/bytesPerSum);
     }
@@ -327,7 +327,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * @param bufferSize the size of the buffer to be used.
    */
   @Override
-    public FSDataInputStream open(Path f, int bufferSize) throws IOException {
+  public FSDataInputStream open(Path f, int bufferSize) throws IOException {
     if (!exists(f)) {
       throw new FileNotFoundException(f.toString());
     }
@@ -405,7 +405,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     
     public void close() throws IOException {
       writeSum();
-      if(sums != null) {
+      if (sums != null) {
         sums.close();
       }
       out.close();
@@ -429,8 +429,8 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * @param replication required block replication for the file. 
    */
   @Override
-    public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
-                                     short replication, long blockSize, Progressable progress)
+  public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
+                                   short replication, long blockSize, Progressable progress)
     throws IOException {
     if (exists(f) && !overwrite) {
       throw new IOException("File already exists:" + f);
@@ -497,7 +497,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
       return fs.delete(f);
     } else {
       Path checkFile = getChecksumFile(f);
-      if(fs.exists(checkFile)) {
+      if (fs.exists(checkFile)) {
         fs.delete(checkFile);
       }
 
@@ -518,7 +518,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * @exception IOException
    */
   @Override
-    public Path[] listPaths(Path[] files) throws IOException {
+  public Path[] listPaths(Path[] files) throws IOException {
     return fs.listPaths(files, DEFAULT_FILTER);
   }
 
@@ -533,17 +533,17 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
   }
 
   @Override
-    public boolean mkdirs(Path f) throws IOException {
+  public boolean mkdirs(Path f) throws IOException {
     return fs.mkdirs(f);
   }
 
   @Override
-    public void lock(Path f, boolean shared) throws IOException {
+  public void lock(Path f, boolean shared) throws IOException {
     if (fs.isDirectory(f)) {
       fs.lock(f, shared);
     } else {
       Path checkFile = getChecksumFile(f);
-      if(fs.exists(checkFile)) {
+      if (fs.exists(checkFile)) {
         fs.lock(checkFile, shared);
       }
       fs.lock(f, shared);
@@ -551,12 +551,12 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
   }
 
   @Override
-    public void release(Path f) throws IOException {
+  public void release(Path f) throws IOException {
     if (fs.isDirectory(f)) {
       fs.release(f);
     } else {
       Path checkFile = getChecksumFile(f);
-      if(fs.exists(checkFile)) {
+      if (fs.exists(checkFile)) {
         fs.release(getChecksumFile(f));
       }
       fs.release(f);
@@ -564,7 +564,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
   }
 
   @Override
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     FileSystem localFs = getNamed("file:///", getConf());
     FileUtil.copy(localFs, src, this, dst, delSrc, getConf());
@@ -575,7 +575,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * Copy it from FS control to the local dst name.
    */
   @Override
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     FileSystem localFs = getNamed("file:///", getConf());
     FileUtil.copy(this, src, localFs, dst, delSrc, getConf());
@@ -615,13 +615,13 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
   }
 
   @Override
-    public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+  public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
     return tmpLocalFile;
   }
 
   @Override
-    public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
+  public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
     throws IOException {
     moveFromLocalFile(tmpLocalFile, fsOutputFile);
   }

+ 7 - 7
src/java/org/apache/hadoop/fs/DF.java

@@ -42,19 +42,19 @@ public class DF {
   private int percentUsed;
   private String mount;
   
-  public DF(File path, Configuration conf ) throws IOException {
-    this( path, conf.getLong( "dfs.df.interval", DF.DF_INTERVAL_DEFAULT ));
+  public DF(File path, Configuration conf) throws IOException {
+    this(path, conf.getLong("dfs.df.interval", DF.DF_INTERVAL_DEFAULT));
   }
 
   public DF(File path, long dfInterval) throws IOException {
     this.dirPath = path.getCanonicalPath();
     this.dfInterval = dfInterval;
-    lastDF = ( dfInterval < 0 ) ? 0 : -dfInterval;
+    lastDF = (dfInterval < 0) ? 0 : -dfInterval;
     this.doDF();
   }
   
   private void doDF() throws IOException { 
-    if( lastDF + dfInterval > System.currentTimeMillis() )
+    if (lastDF + dfInterval > System.currentTimeMillis())
       return;
     Process process;
     process = Runtime.getRuntime().exec(getExecString());
@@ -138,10 +138,10 @@ public class DF {
   }
 
   private String[] getExecString() {
-    return new String[] {"df","-k",dirPath};
+    return new String[] {"df","-k", dirPath};
   }
   
-  private void parseExecResult( BufferedReader lines ) throws IOException {
+  private void parseExecResult(BufferedReader lines) throws IOException {
     lines.readLine();                         // skip headings
   
     StringTokenizer tokens =
@@ -161,7 +161,7 @@ public class DF {
 
   public static void main(String[] args) throws Exception {
     String path = ".";
-    if( args.length > 0 )
+    if (args.length > 0)
       path = args[0];
 
     System.out.println(new DF(new File(path), DF_INTERVAL_DEFAULT).toString());

+ 9 - 9
src/java/org/apache/hadoop/fs/FSDataInputStream.java

@@ -38,7 +38,7 @@ public class FSDataInputStream extends DataInputStream
     // calls to it in order to cache the position.
     public int read(byte b[], int off, int len) throws IOException {
       int result;
-      if( (result = in.read(b, off, len)) > 0 )
+      if ((result = in.read(b, off, len)) > 0)
         position += result;
       return result;
     }
@@ -53,12 +53,12 @@ public class FSDataInputStream extends DataInputStream
     }
     
     public int read(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       return ((FSInputStream)in).read(position, buffer, offset, length);
     }
     
     public void readFully(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       ((FSInputStream)in).readFully(position, buffer, offset, length);
     }
   }
@@ -95,12 +95,12 @@ public class FSDataInputStream extends DataInputStream
     }
 
     public int read(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       return ((PositionCache)in).read(position, buffer, offset, length);
     }
     
     public void readFully(long position, byte[] buffer, int offset, int length)
-    throws IOException {
+      throws IOException {
       ((PositionCache)in).readFully(position, buffer, offset, length);
     }
   }
@@ -113,7 +113,7 @@ public class FSDataInputStream extends DataInputStream
   
   public FSDataInputStream(FSInputStream in, int bufferSize)
     throws IOException {
-    super( new Buffer(new PositionCache(in), bufferSize) );
+    super(new Buffer(new PositionCache(in), bufferSize));
     this.inStream = in;
   }
   
@@ -126,17 +126,17 @@ public class FSDataInputStream extends DataInputStream
   }
   
   public int read(long position, byte[] buffer, int offset, int length)
-  throws IOException {
+    throws IOException {
     return ((Buffer)in).read(position, buffer, offset, length);
   }
   
   public void readFully(long position, byte[] buffer, int offset, int length)
-  throws IOException {
+    throws IOException {
     ((Buffer)in).readFully(position, buffer, offset, length);
   }
   
   public void readFully(long position, byte[] buffer)
-  throws IOException {
+    throws IOException {
     ((Buffer)in).readFully(position, buffer, 0, buffer.length);
   }
   

+ 2 - 2
src/java/org/apache/hadoop/fs/FSDataOutputStream.java

@@ -74,12 +74,12 @@ public class FSDataOutputStream extends DataOutputStream {
   }
 
   public FSDataOutputStream(OutputStream out, int bufferSize)
-  throws IOException {
+    throws IOException {
     super(new Buffer(new PositionCache(out), bufferSize));
   }
   
   public FSDataOutputStream(OutputStream out, Configuration conf)
-  throws IOException {
+    throws IOException {
     this(out, conf.getInt("io.file.buffer.size", 4096));
   }
 

+ 716 - 716
src/java/org/apache/hadoop/fs/FileSystem.java

@@ -48,812 +48,812 @@ import org.apache.hadoop.util.*;
  * @author Mike Cafarella
  *****************************************************************/
 public abstract class FileSystem extends Configured {
-    public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.FileSystem");
-
-    // cache indexed by URI scheme and authority
-    private static final Map<String,Map<String,FileSystem>> CACHE
-      = new HashMap<String,Map<String,FileSystem>>();
-    /**
-     * Parse the cmd-line args, starting at i.  Remove consumed args
-     * from array.  We expect param in the form:
-     * '-local | -dfs <namenode:port>'
-     */
-    public static FileSystem parseArgs(String argv[], int i, Configuration conf) throws IOException {
-        /**
-        if (argv.length - i < 1) {
-            throw new IOException("Must indicate filesystem type for DFS");
-        }
-        */
-        int orig = i;
-        FileSystem fs = null;
-        String cmd = argv[i];
-        if ("-dfs".equals(cmd)) {
-            i++;
-            InetSocketAddress addr = DataNode.createSocketAddr(argv[i++]);
-            fs = new DistributedFileSystem(addr, conf);
-        } else if ("-local".equals(cmd)) {
-            i++;
-            fs = FileSystem.getLocal(conf);
-        } else {
-            fs = get(conf);                          // using default
-            LOG.info("No FS indicated, using default:"+fs.getName());
-
-        }
-        System.arraycopy(argv, i, argv, orig, argv.length - i);
-        for (int j = argv.length - i; j < argv.length; j++) {
-            argv[j] = null;
-        }
-        return fs;
-    }
-
-    /** Returns the configured filesystem implementation.*/
-    public static FileSystem get(Configuration conf) throws IOException {
-      return getNamed(conf.get("fs.default.name", "local"), conf);
-    }
-
-    /** Called after a new FileSystem instance is constructed.
-     * @param name a uri whose authority section names the host, port, etc.
-     *   for this FileSystem
-     * @param conf the configuration
-     */
-    public abstract void initialize(URI name, Configuration conf)
-      throws IOException;
+  public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.FileSystem");
+
+  // cache indexed by URI scheme and authority
+  private static final Map<String,Map<String,FileSystem>> CACHE
+    = new HashMap<String,Map<String,FileSystem>>();
+  /**
+   * Parse the cmd-line args, starting at i.  Remove consumed args
+   * from array.  We expect param in the form:
+   * '-local | -dfs <namenode:port>'
+   */
+  public static FileSystem parseArgs(String argv[], int i, Configuration conf) throws IOException {
+    /**
+       if (argv.length - i < 1) {
+       throw new IOException("Must indicate filesystem type for DFS");
+       }
+    */
+    int orig = i;
+    FileSystem fs = null;
+    String cmd = argv[i];
+    if ("-dfs".equals(cmd)) {
+      i++;
+      InetSocketAddress addr = DataNode.createSocketAddr(argv[i++]);
+      fs = new DistributedFileSystem(addr, conf);
+    } else if ("-local".equals(cmd)) {
+      i++;
+      fs = FileSystem.getLocal(conf);
+    } else {
+      fs = get(conf);                          // using default
+      LOG.info("No FS indicated, using default:"+fs.getName());
+
+    }
+    System.arraycopy(argv, i, argv, orig, argv.length - i);
+    for (int j = argv.length - i; j < argv.length; j++) {
+      argv[j] = null;
+    }
+    return fs;
+  }
+
+  /** Returns the configured filesystem implementation.*/
+  public static FileSystem get(Configuration conf) throws IOException {
+    return getNamed(conf.get("fs.default.name", "local"), conf);
+  }
+
+  /** Called after a new FileSystem instance is constructed.
+   * @param name a uri whose authority section names the host, port, etc.
+   *   for this FileSystem
+   * @param conf the configuration
+   */
+  public abstract void initialize(URI name, Configuration conf)
+    throws IOException;
 
-    /** Returns a URI whose scheme and authority identify this FileSystem.*/
-    public abstract URI getUri();
+  /** Returns a URI whose scheme and authority identify this FileSystem.*/
+  public abstract URI getUri();
   
-    /** @deprecated call #getUri() instead.*/
-    public abstract String getName();
-
-    /** @deprecated call #get(URI,Configuration) instead. */
-    public static FileSystem getNamed(String name, Configuration conf)
-      throws IOException {
-
-      // convert old-format name to new-format name
-      if (name.equals("local")) {         // "local" is now "file:///".
-        name = "file:///";
-      } else if (name.indexOf('/')==-1) {   // unqualified is "hdfs://"
-        name = "hdfs://"+name;
-      }
+  /** @deprecated call #getUri() instead.*/
+  public abstract String getName();
 
-      return get(URI.create(name), conf);
-    }
+  /** @deprecated call #get(URI,Configuration) instead. */
+  public static FileSystem getNamed(String name, Configuration conf)
+    throws IOException {
 
-    /**
-     * Get the local file syste
-     * @param conf the configuration to configure the file system with
-     * @return a LocalFileSystem
-     */
-    public static LocalFileSystem getLocal(Configuration conf)
-      throws IOException {
-      return (LocalFileSystem)get(LocalFileSystem.NAME, conf);
+    // convert old-format name to new-format name
+    if (name.equals("local")) {         // "local" is now "file:///".
+      name = "file:///";
+    } else if (name.indexOf('/')==-1) {   // unqualified is "hdfs://"
+      name = "hdfs://"+name;
     }
 
-    /** Returns the FileSystem for this URI's scheme and authority.  The scheme
-     * of the URI determines a configuration property name,
-     * <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
-     * The entire URI is passed to the FileSystem instance's initialize method.
-     */
-    public static synchronized FileSystem get(URI uri, Configuration conf)
-      throws IOException {
+    return get(URI.create(name), conf);
+  }
 
-      String scheme = uri.getScheme();
-      String authority = uri.getAuthority();
+  /**
+   * Get the local file syste
+   * @param conf the configuration to configure the file system with
+   * @return a LocalFileSystem
+   */
+  public static LocalFileSystem getLocal(Configuration conf)
+    throws IOException {
+    return (LocalFileSystem)get(LocalFileSystem.NAME, conf);
+  }
+
+  /** Returns the FileSystem for this URI's scheme and authority.  The scheme
+   * of the URI determines a configuration property name,
+   * <tt>fs.<i>scheme</i>.class</tt> whose value names the FileSystem class.
+   * The entire URI is passed to the FileSystem instance's initialize method.
+   */
+  public static synchronized FileSystem get(URI uri, Configuration conf)
+    throws IOException {
 
-      if (scheme == null) {                       // no scheme: use default FS
-        return get(conf);
-      }
+    String scheme = uri.getScheme();
+    String authority = uri.getAuthority();
 
-      Map<String,FileSystem> authorityToFs = CACHE.get(scheme);
-      if (authorityToFs == null) {
-        authorityToFs = new HashMap<String,FileSystem>();
-        CACHE.put(scheme, authorityToFs);
-      }
-
-      FileSystem fs = authorityToFs.get(authority);
-      if (fs == null) {
-        Class fsClass = conf.getClass("fs."+scheme+".impl", null);
-        if (fsClass == null) {
-          throw new IOException("No FileSystem for scheme: " + scheme);
-        }
-        fs = (FileSystem)ReflectionUtils.newInstance(fsClass, conf);
-        fs.initialize(uri, conf);
-        authorityToFs.put(authority, fs);
-      }
+    if (scheme == null) {                       // no scheme: use default FS
+      return get(conf);
+    }
 
-      return fs;
+    Map<String,FileSystem> authorityToFs = CACHE.get(scheme);
+    if (authorityToFs == null) {
+      authorityToFs = new HashMap<String,FileSystem>();
+      CACHE.put(scheme, authorityToFs);
     }
 
-    /**
-     * Close all cached filesystems. Be sure those filesystems are not
-     * used anymore.
-     * 
-     * @throws IOException
-     */
-    public static synchronized void closeAll() throws IOException{
-      for(Map<String, FileSystem>  fss : CACHE.values()){
-        for(FileSystem fs : fss.values()){
-          fs.close();
-        }
+    FileSystem fs = authorityToFs.get(authority);
+    if (fs == null) {
+      Class fsClass = conf.getClass("fs."+scheme+".impl", null);
+      if (fsClass == null) {
+        throw new IOException("No FileSystem for scheme: " + scheme);
+      }
+      fs = (FileSystem)ReflectionUtils.newInstance(fsClass, conf);
+      fs.initialize(uri, conf);
+      authorityToFs.put(authority, fs);
+    }
+
+    return fs;
+  }
+
+  /**
+   * Close all cached filesystems. Be sure those filesystems are not
+   * used anymore.
+   * 
+   * @throws IOException
+   */
+  public static synchronized void closeAll() throws IOException{
+    for(Map<String, FileSystem>  fss : CACHE.values()){
+      for(FileSystem fs : fss.values()){
+        fs.close();
       }
     }
+  }
 
-    /** Make sure that a path specifies a FileSystem. */
-    public Path makeQualified(Path path) {
-      checkPath(path);
+  /** Make sure that a path specifies a FileSystem. */
+  public Path makeQualified(Path path) {
+    checkPath(path);
 
-      if (!path.isAbsolute())
-        path = new Path(getWorkingDirectory(), path);
+    if (!path.isAbsolute())
+      path = new Path(getWorkingDirectory(), path);
 
-      URI pathUri = path.toUri();
-      URI fsUri = getUri();
+    URI pathUri = path.toUri();
+    URI fsUri = getUri();
       
-      String scheme = pathUri.getScheme();
-      String authority = pathUri.getAuthority();
+    String scheme = pathUri.getScheme();
+    String authority = pathUri.getAuthority();
 
-      if (scheme != null &&
-          (authority != null || fsUri.getAuthority() == null))
-        return path;
+    if (scheme != null &&
+        (authority != null || fsUri.getAuthority() == null))
+      return path;
 
-      if (scheme == null) {
-        scheme = fsUri.getScheme();
-      }
+    if (scheme == null) {
+      scheme = fsUri.getScheme();
+    }
 
+    if (authority == null) {
+      authority = fsUri.getAuthority();
       if (authority == null) {
-        authority = fsUri.getAuthority();
-        if (authority == null) {
-          authority = "";
-        }
+        authority = "";
       }
-
-      return new Path(scheme+":"+"//"+authority + pathUri.getPath());
-    }
-    
-    ///////////////////////////////////////////////////////////////
-    // FileSystem
-    ///////////////////////////////////////////////////////////////
-
-    protected FileSystem() {
-      super(null);
     }
 
-    /** Check that a Path belongs to this FileSystem. */
-    protected void checkPath(Path path) {
-      URI uri = path.toUri();
-      if (uri.getScheme() == null)                // fs is relative 
-        return;
-      String thisAuthority = this.getUri().getAuthority();
-      String thatAuthority = uri.getAuthority();
-      if (!(this.getUri().getScheme().equals(uri.getScheme()) &&
-            (thisAuthority == null && thatAuthority == null)
-            || thisAuthority.equals(thatAuthority)))
-        throw new IllegalArgumentException("Wrong FS: "+path+
-                                           ", expected: "+this.getUri());
-    }
-
-    /**
-     * Return a 2D array of size 1x1 or greater, containing hostnames 
-     * where portions of the given file can be found.  For a nonexistent 
-     * file or regions, null will be returned.
-     *
-     * This call is most helpful with DFS, where it returns 
-     * hostnames of machines that contain the given file.
-     *
-     * The FileSystem will simply return an elt containing 'localhost'.
-     */
-    public abstract String[][] getFileCacheHints(Path f, long start, long len) throws IOException;
-
-    /**
-     * Opens an FSDataInputStream at the indicated Path.
-     * @param f the file name to open
-     * @param bufferSize the size of the buffer to be used.
-     */
-    public abstract FSDataInputStream open(Path f, int bufferSize)
+    return new Path(scheme+":"+"//"+authority + pathUri.getPath());
+  }
+    
+  ///////////////////////////////////////////////////////////////
+  // FileSystem
+  ///////////////////////////////////////////////////////////////
+
+  protected FileSystem() {
+    super(null);
+  }
+
+  /** Check that a Path belongs to this FileSystem. */
+  protected void checkPath(Path path) {
+    URI uri = path.toUri();
+    if (uri.getScheme() == null)                // fs is relative 
+      return;
+    String thisAuthority = this.getUri().getAuthority();
+    String thatAuthority = uri.getAuthority();
+    if (!(this.getUri().getScheme().equals(uri.getScheme()) &&
+          (thisAuthority == null && thatAuthority == null)
+          || thisAuthority.equals(thatAuthority)))
+      throw new IllegalArgumentException("Wrong FS: "+path+
+                                         ", expected: "+this.getUri());
+  }
+
+  /**
+   * Return a 2D array of size 1x1 or greater, containing hostnames 
+   * where portions of the given file can be found.  For a nonexistent 
+   * file or regions, null will be returned.
+   *
+   * This call is most helpful with DFS, where it returns 
+   * hostnames of machines that contain the given file.
+   *
+   * The FileSystem will simply return an elt containing 'localhost'.
+   */
+  public abstract String[][] getFileCacheHints(Path f, long start, long len) throws IOException;
+
+  /**
+   * Opens an FSDataInputStream at the indicated Path.
+   * @param f the file name to open
+   * @param bufferSize the size of the buffer to be used.
+   */
+  public abstract FSDataInputStream open(Path f, int bufferSize)
     throws IOException;
     
-    /**
-     * Opens an FSDataInputStream at the indicated Path.
-     * @param f the file to open
-     */
-    public FSDataInputStream open(Path f) throws IOException {
-      return open(f, getConf().getInt("io.file.buffer.size", 4096));
-    }
-
-    /**
-     * Opens an FSDataOutputStream at the indicated Path.
-     * Files are overwritten by default.
-     */
-    public FSDataOutputStream create(Path f) throws IOException {
-      return create(f, true, 
-                    getConf().getInt("io.file.buffer.size", 4096),
-                    getDefaultReplication(),
-                    getDefaultBlockSize());
-    }
-
-    /**
-     * Create an FSDataOutputStream at the indicated Path with write-progress
-     * reporting.
-     * Files are overwritten by default.
-     */
-    public FSDataOutputStream create(Path f, Progressable progress) throws IOException {
-      return create(f, true, 
-                    getConf().getInt("io.file.buffer.size", 4096),
-                    getDefaultReplication(),
-                    getDefaultBlockSize(), progress);
-    }
-
-    /**
-     * Opens an FSDataOutputStream at the indicated Path.
-     * Files are overwritten by default.
-     */
-    public FSDataOutputStream create(Path f, short replication)
-      throws IOException {
-      return create(f, true, 
-                    getConf().getInt("io.file.buffer.size", 4096),
-                    replication,
-                    getDefaultBlockSize());
-    }
-
-    /**
-     * Opens an FSDataOutputStream at the indicated Path with write-progress
-     * reporting.
-     * Files are overwritten by default.
-     */
-    public FSDataOutputStream create(Path f, short replication, Progressable progress)
-      throws IOException {
-      return create(f, true, 
-                    getConf().getInt("io.file.buffer.size", 4096),
-                    replication,
-                    getDefaultBlockSize(), progress);
-    }
+  /**
+   * Opens an FSDataInputStream at the indicated Path.
+   * @param f the file to open
+   */
+  public FSDataInputStream open(Path f) throws IOException {
+    return open(f, getConf().getInt("io.file.buffer.size", 4096));
+  }
+
+  /**
+   * Opens an FSDataOutputStream at the indicated Path.
+   * Files are overwritten by default.
+   */
+  public FSDataOutputStream create(Path f) throws IOException {
+    return create(f, true, 
+                  getConf().getInt("io.file.buffer.size", 4096),
+                  getDefaultReplication(),
+                  getDefaultBlockSize());
+  }
+
+  /**
+   * Create an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * Files are overwritten by default.
+   */
+  public FSDataOutputStream create(Path f, Progressable progress) throws IOException {
+    return create(f, true, 
+                  getConf().getInt("io.file.buffer.size", 4096),
+                  getDefaultReplication(),
+                  getDefaultBlockSize(), progress);
+  }
+
+  /**
+   * Opens an FSDataOutputStream at the indicated Path.
+   * Files are overwritten by default.
+   */
+  public FSDataOutputStream create(Path f, short replication)
+    throws IOException {
+    return create(f, true, 
+                  getConf().getInt("io.file.buffer.size", 4096),
+                  replication,
+                  getDefaultBlockSize());
+  }
+
+  /**
+   * Opens an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * Files are overwritten by default.
+   */
+  public FSDataOutputStream create(Path f, short replication, Progressable progress)
+    throws IOException {
+    return create(f, true, 
+                  getConf().getInt("io.file.buffer.size", 4096),
+                  replication,
+                  getDefaultBlockSize(), progress);
+  }
 
     
-    /**
-     * Opens an FSDataOutputStream at the indicated Path.
-     * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
-     * @param bufferSize the size of the buffer to be used.
-     */
-    public FSDataOutputStream create( Path f, 
-                                      boolean overwrite,
-                                      int bufferSize
-                                    ) throws IOException {
-      return create( f, overwrite, bufferSize, 
-                     getDefaultReplication(),
-                     getDefaultBlockSize());
-    }
+  /**
+   * Opens an FSDataOutputStream at the indicated Path.
+   * @param f the file name to open
+   * @param overwrite if a file with this name already exists, then if true,
+   *   the file will be overwritten, and if false an error will be thrown.
+   * @param bufferSize the size of the buffer to be used.
+   */
+  public FSDataOutputStream create(Path f, 
+                                   boolean overwrite,
+                                   int bufferSize
+                                   ) throws IOException {
+    return create(f, overwrite, bufferSize, 
+                  getDefaultReplication(),
+                  getDefaultBlockSize());
+  }
     
-    /**
-     * Opens an FSDataOutputStream at the indicated Path with write-progress
-     * reporting.
-     * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
-     * @param bufferSize the size of the buffer to be used.
-     */
-    public FSDataOutputStream create( Path f, 
-                                      boolean overwrite,
-                                      int bufferSize,
-                                      Progressable progress
-                                    ) throws IOException {
-      return create( f, overwrite, bufferSize, 
-                     getDefaultReplication(),
-                     getDefaultBlockSize(), progress);
-    }
+  /**
+   * Opens an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * @param f the file name to open
+   * @param overwrite if a file with this name already exists, then if true,
+   *   the file will be overwritten, and if false an error will be thrown.
+   * @param bufferSize the size of the buffer to be used.
+   */
+  public FSDataOutputStream create(Path f, 
+                                   boolean overwrite,
+                                   int bufferSize,
+                                   Progressable progress
+                                   ) throws IOException {
+    return create(f, overwrite, bufferSize, 
+                  getDefaultReplication(),
+                  getDefaultBlockSize(), progress);
+  }
     
     
-    /**
-     * Opens an FSDataOutputStream at the indicated Path.
-     * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
-     * @param bufferSize the size of the buffer to be used.
-     * @param replication required block replication for the file. 
-     */
-    public FSDataOutputStream create( Path f, 
-                                      boolean overwrite,
-                                      int bufferSize,
-                                      short replication,
-                                      long blockSize
-                                    ) throws IOException {
-      return create(f, overwrite, bufferSize, replication, blockSize, null);
-    }
-
-    /**
-     * Opens an FSDataOutputStream at the indicated Path with write-progress
-     * reporting.
-     * @param f the file name to open
-     * @param overwrite if a file with this name already exists, then if true,
-     *   the file will be overwritten, and if false an error will be thrown.
-     * @param bufferSize the size of the buffer to be used.
-     * @param replication required block replication for the file. 
-     */
-    public abstract FSDataOutputStream create( Path f, 
-                                               boolean overwrite,
-                                               int bufferSize,
-                                               short replication,
-                                               long blockSize,
-                                               Progressable progress
-                                             ) throws IOException;
-
-    /**
-     * Creates the given Path as a brand-new zero-length file.  If
-     * create fails, or if it already existed, return false.
-     */
-    public boolean createNewFile(Path f) throws IOException {
-      if (exists(f)) {
-        return false;
-      } else {
-        create(f, false, getConf().getInt("io.file.buffer.size", 4096)).close();
-        return true;
-      }
-    }
-
-    /**
-     * Get replication.
-     * 
-     * @param src file name
-     * @return file replication
-     * @throws IOException
-     */
-    public abstract short getReplication(Path src) throws IOException;
-
-    /**
-     * Set replication for an existing file.
-     * 
-     * @param src file name
-     * @param replication new replication
-     * @throws IOException
-     * @return true if successful;
-     *         false if file does not exist or is a directory
-     */
-    public abstract boolean setReplication(Path src, short replication) throws IOException;
-
-    /**
-     * Renames Path src to Path dst.  Can take place on local fs
-     * or remote DFS.
-     */
-    public abstract boolean rename(Path src, Path dst) throws IOException;
+  /**
+   * Opens an FSDataOutputStream at the indicated Path.
+   * @param f the file name to open
+   * @param overwrite if a file with this name already exists, then if true,
+   *   the file will be overwritten, and if false an error will be thrown.
+   * @param bufferSize the size of the buffer to be used.
+   * @param replication required block replication for the file. 
+   */
+  public FSDataOutputStream create(Path f, 
+                                   boolean overwrite,
+                                   int bufferSize,
+                                   short replication,
+                                   long blockSize
+                                   ) throws IOException {
+    return create(f, overwrite, bufferSize, replication, blockSize, null);
+  }
+
+  /**
+   * Opens an FSDataOutputStream at the indicated Path with write-progress
+   * reporting.
+   * @param f the file name to open
+   * @param overwrite if a file with this name already exists, then if true,
+   *   the file will be overwritten, and if false an error will be thrown.
+   * @param bufferSize the size of the buffer to be used.
+   * @param replication required block replication for the file. 
+   */
+  public abstract FSDataOutputStream create(Path f, 
+                                            boolean overwrite,
+                                            int bufferSize,
+                                            short replication,
+                                            long blockSize,
+                                            Progressable progress
+                                            ) throws IOException;
+
+  /**
+   * Creates the given Path as a brand-new zero-length file.  If
+   * create fails, or if it already existed, return false.
+   */
+  public boolean createNewFile(Path f) throws IOException {
+    if (exists(f)) {
+      return false;
+    } else {
+      create(f, false, getConf().getInt("io.file.buffer.size", 4096)).close();
+      return true;
+    }
+  }
+
+  /**
+   * Get replication.
+   * 
+   * @param src file name
+   * @return file replication
+   * @throws IOException
+   */
+  public abstract short getReplication(Path src) throws IOException;
+
+  /**
+   * Set replication for an existing file.
+   * 
+   * @param src file name
+   * @param replication new replication
+   * @throws IOException
+   * @return true if successful;
+   *         false if file does not exist or is a directory
+   */
+  public abstract boolean setReplication(Path src, short replication) throws IOException;
+
+  /**
+   * Renames Path src to Path dst.  Can take place on local fs
+   * or remote DFS.
+   */
+  public abstract boolean rename(Path src, Path dst) throws IOException;
     
-    /** Delete a file */
-    public abstract boolean delete(Path f) throws IOException;
+  /** Delete a file */
+  public abstract boolean delete(Path f) throws IOException;
     
-    /** Check if exists.
-     * @param f source file
-     */
-    public abstract boolean exists(Path f) throws IOException;
-
-    /** True iff the named path is a directory. */
-    public abstract boolean isDirectory(Path f) throws IOException;
-
-    /** True iff the named path is a regular file. */
-    public boolean isFile(Path f) throws IOException {
-      if (exists(f) && ! isDirectory(f)) {
-        return true;
-      } else {
-        return false;
-      }
-    }
+  /** Check if exists.
+   * @param f source file
+   */
+  public abstract boolean exists(Path f) throws IOException;
+
+  /** True iff the named path is a directory. */
+  public abstract boolean isDirectory(Path f) throws IOException;
+
+  /** True iff the named path is a regular file. */
+  public boolean isFile(Path f) throws IOException {
+    if (exists(f) && !isDirectory(f)) {
+      return true;
+    } else {
+      return false;
+    }
+  }
     
-    /** The number of bytes in a file. */
-    public abstract long getLength(Path f) throws IOException;
+  /** The number of bytes in a file. */
+  public abstract long getLength(Path f) throws IOException;
     
-    /** Return the number of bytes of the given path 
-     * If <i>f</i> is a file, return the size of the file;
-     * If <i>f</i> is a directory, return the size of the directory tree
-     */
-    public long getContentLength(Path f) throws IOException {
-      if (!isDirectory(f)) {
-        // f is a file
-        return getLength(f);
-      }
+  /** Return the number of bytes of the given path 
+   * If <i>f</i> is a file, return the size of the file;
+   * If <i>f</i> is a directory, return the size of the directory tree
+   */
+  public long getContentLength(Path f) throws IOException {
+    if (!isDirectory(f)) {
+      // f is a file
+      return getLength(f);
+    }
       
-      // f is a diretory
-      Path[] contents = listPaths(f);
-      long size = 0;
-      for(int i=0; i<contents.length; i++) {
-        size += getContentLength(contents[i]);
-      }
-      return size;
+    // f is a diretory
+    Path[] contents = listPaths(f);
+    long size = 0;
+    for(int i=0; i<contents.length; i++) {
+      size += getContentLength(contents[i]);
     }
+    return size;
+  }
 
-    final private static PathFilter DEFAULT_FILTER = new PathFilter() {
+  final private static PathFilter DEFAULT_FILTER = new PathFilter() {
       public boolean accept(Path file) {
         return true;
       }     
     };
     
-    /** List files in a directory. */
-    public abstract Path[] listPaths(Path f) throws IOException;
+  /** List files in a directory. */
+  public abstract Path[] listPaths(Path f) throws IOException;
     
-    /** 
-     * Filter files in the given pathes using the default checksum filter. 
-     * @param files a list of paths
-     * @return a list of files under the source paths
-     * @exception IOException
-     */
-    public Path[] listPaths(Path[] files ) throws IOException {
-      return listPaths(files, DEFAULT_FILTER);
-    }
-
-    /** Filter files in a directory. */
-    private void listPaths(ArrayList<Path> results, Path f, PathFilter filter)
-      throws IOException {
-      Path listing[] = listPaths(f);
-      if (listing != null) {
-        for (int i = 0; i < listing.length; i++) {
-          if (filter.accept(listing[i])) {
-            results.add(listing[i]);
-          }
+  /** 
+   * Filter files in the given pathes using the default checksum filter. 
+   * @param files a list of paths
+   * @return a list of files under the source paths
+   * @exception IOException
+   */
+  public Path[] listPaths(Path[] files) throws IOException {
+    return listPaths(files, DEFAULT_FILTER);
+  }
+
+  /** Filter files in a directory. */
+  private void listPaths(ArrayList<Path> results, Path f, PathFilter filter)
+    throws IOException {
+    Path listing[] = listPaths(f);
+    if (listing != null) {
+      for (int i = 0; i < listing.length; i++) {
+        if (filter.accept(listing[i])) {
+          results.add(listing[i]);
         }
-      }      
-    }
+      }
+    }      
+  }
     
-    /** Filter files in a directory. */
-    public Path[] listPaths(Path f, PathFilter filter) throws IOException {
-      ArrayList<Path> results = new ArrayList<Path>();
-      listPaths(results, f, filter);
-      return (Path[]) results.toArray(new Path[results.size()]);
-    }
+  /** Filter files in a directory. */
+  public Path[] listPaths(Path f, PathFilter filter) throws IOException {
+    ArrayList<Path> results = new ArrayList<Path>();
+    listPaths(results, f, filter);
+    return (Path[]) results.toArray(new Path[results.size()]);
+  }
     
-    /** 
-     * Filter files in a list directories using user-supplied path filter. 
-     * @param files a list of paths
-     * @return a list of files under the source paths
-     * @exception IOException
-     */
-    public Path[] listPaths(Path[] files, PathFilter filter)
+  /** 
+   * Filter files in a list directories using user-supplied path filter. 
+   * @param files a list of paths
+   * @return a list of files under the source paths
+   * @exception IOException
+   */
+  public Path[] listPaths(Path[] files, PathFilter filter)
     throws IOException {
-      ArrayList<Path> results = new ArrayList<Path>();
-      for(int i=0; i<files.length; i++) {
-        listPaths(results, files[i], filter);
-      }
-      return (Path[]) results.toArray(new Path[results.size()]);
+    ArrayList<Path> results = new ArrayList<Path>();
+    for(int i=0; i<files.length; i++) {
+      listPaths(results, files[i], filter);
     }
+    return (Path[]) results.toArray(new Path[results.size()]);
+  }
     
-    /**
-     * <p>Return all the files that match filePattern and are not checksum
-     * files. Results are sorted by their names.
-     * 
-     * <p>
-     * A filename pattern is composed of <i>regular</i> characters and
-     * <i>special pattern matching</i> characters, which are:
-     *
-     * <dl>
-     *  <dd>
-     *   <dl>
-     *    <p>
-     *    <dt> <tt> ? </tt>
-     *    <dd> Matches any single character.
-     *
-     *    <p>
-     *    <dt> <tt> * </tt>
-     *    <dd> Matches zero or more characters.
-     *
-     *    <p>
-     *    <dt> <tt> [<i>abc</i>] </tt>
-     *    <dd> Matches a single character from character set
-     *     <tt>{<i>a,b,c</i>}</tt>.
-     *
-     *    <p>
-     *    <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
-     *    <dd> Matches a single character from the character range
-     *     <tt>{<i>a...b</i>}</tt>.  Note that character <tt><i>a</i></tt> must be
-     *     lexicographically less than or equal to character <tt><i>b</i></tt>.
-     *
-     *    <p>
-     *    <dt> <tt> [^<i>a</i>] </tt>
-     *    <dd> Matches a single character that is not from character set or range
-     *     <tt>{<i>a</i>}</tt>.  Note that the <tt>^</tt> character must occur
-     *     immediately to the right of the opening bracket.
-     *
-     *    <p>
-     *    <dt> <tt> \<i>c</i> </tt>
-     *    <dd> Removes (escapes) any special meaning of character <i>c</i>.
-     *
-     *   </dl>
-     *  </dd>
-     * </dl>
-     *
-     * @param filePattern a regular expression specifying file pattern
-
-     * @return an array of paths that match the file pattern
-     * @throws IOException
-     */
-    public Path[] globPaths(Path filePattern) throws IOException {
-      return globPaths(filePattern, DEFAULT_FILTER);
-    }
+  /**
+   * <p>Return all the files that match filePattern and are not checksum
+   * files. Results are sorted by their names.
+   * 
+   * <p>
+   * A filename pattern is composed of <i>regular</i> characters and
+   * <i>special pattern matching</i> characters, which are:
+   *
+   * <dl>
+   *  <dd>
+   *   <dl>
+   *    <p>
+   *    <dt> <tt> ? </tt>
+   *    <dd> Matches any single character.
+   *
+   *    <p>
+   *    <dt> <tt> * </tt>
+   *    <dd> Matches zero or more characters.
+   *
+   *    <p>
+   *    <dt> <tt> [<i>abc</i>] </tt>
+   *    <dd> Matches a single character from character set
+   *     <tt>{<i>a,b,c</i>}</tt>.
+   *
+   *    <p>
+   *    <dt> <tt> [<i>a</i>-<i>b</i>] </tt>
+   *    <dd> Matches a single character from the character range
+   *     <tt>{<i>a...b</i>}</tt>.  Note that character <tt><i>a</i></tt> must be
+   *     lexicographically less than or equal to character <tt><i>b</i></tt>.
+   *
+   *    <p>
+   *    <dt> <tt> [^<i>a</i>] </tt>
+   *    <dd> Matches a single character that is not from character set or range
+   *     <tt>{<i>a</i>}</tt>.  Note that the <tt>^</tt> character must occur
+   *     immediately to the right of the opening bracket.
+   *
+   *    <p>
+   *    <dt> <tt> \<i>c</i> </tt>
+   *    <dd> Removes (escapes) any special meaning of character <i>c</i>.
+   *
+   *   </dl>
+   *  </dd>
+   * </dl>
+   *
+   * @param filePattern a regular expression specifying file pattern
+
+   * @return an array of paths that match the file pattern
+   * @throws IOException
+   */
+  public Path[] globPaths(Path filePattern) throws IOException {
+    return globPaths(filePattern, DEFAULT_FILTER);
+  }
     
-    /** glob all the file names that matches filePattern
-     * and is accepted by filter.
-     */
-    public Path[] globPaths(Path filePattern, PathFilter filter) 
-        throws IOException {
-      Path [] parents = new Path[1];
-      int level = 0;
-      String filename = filePattern.toUri().getPath();
-      if("".equals(filename) || Path.SEPARATOR.equals(filename)) {
-        parents[0] = filePattern;
-        return parents;
-      }
-      
-      String [] components = filename.split(Path.SEPARATOR);
-      if(filePattern.isAbsolute()) {
-        parents[0] = new Path(Path.SEPARATOR);
-        level = 1;
-      } else {
-        parents[0] = new Path( "" );
-      }
+  /** glob all the file names that matches filePattern
+   * and is accepted by filter.
+   */
+  public Path[] globPaths(Path filePattern, PathFilter filter) 
+    throws IOException {
+    Path [] parents = new Path[1];
+    int level = 0;
+    String filename = filePattern.toUri().getPath();
+    if ("".equals(filename) || Path.SEPARATOR.equals(filename)) {
+      parents[0] = filePattern;
+      return parents;
+    }
       
-      Path[] results = globPathsLevel(parents, components, level, filter);
-      Arrays.sort(results);
-      return results;
+    String [] components = filename.split(Path.SEPARATOR);
+    if (filePattern.isAbsolute()) {
+      parents[0] = new Path(Path.SEPARATOR);
+      level = 1;
+    } else {
+      parents[0] = new Path("");
     }
+      
+    Path[] results = globPathsLevel(parents, components, level, filter);
+    Arrays.sort(results);
+    return results;
+  }
     
-    private Path[] globPathsLevel(Path[] parents,
-        String [] filePattern, int level, PathFilter filter) throws IOException {
-      if (level == filePattern.length)
-        return parents;
-      GlobFilter fp = new GlobFilter(filePattern[level], filter);
-      if( fp.hasPattern()) {
-        parents = listPaths(parents, fp);
-      } else {
-        for(int i=0; i<parents.length; i++) {
-          parents[i] = new Path(parents[i], filePattern[level]);
-        }
+  private Path[] globPathsLevel(Path[] parents,
+                                String [] filePattern, int level, PathFilter filter) throws IOException {
+    if (level == filePattern.length)
+      return parents;
+    GlobFilter fp = new GlobFilter(filePattern[level], filter);
+    if (fp.hasPattern()) {
+      parents = listPaths(parents, fp);
+    } else {
+      for(int i=0; i<parents.length; i++) {
+        parents[i] = new Path(parents[i], filePattern[level]);
       }
-      return globPathsLevel(parents, filePattern, level+1, filter);      
     }
+    return globPathsLevel(parents, filePattern, level+1, filter);      
+  }
  
-    private static class GlobFilter implements PathFilter {
-      private PathFilter userFilter = DEFAULT_FILTER;
-      private Pattern regex;
-      private boolean hasPattern = false;
+  private static class GlobFilter implements PathFilter {
+    private PathFilter userFilter = DEFAULT_FILTER;
+    private Pattern regex;
+    private boolean hasPattern = false;
       
-      /** Default pattern character: Escape any special meaning. */
-      private static final char  PAT_ESCAPE = '\\';
-      /** Default pattern character: Any single character. */
-      private static final char  PAT_ANY = '.';
-      /** Default pattern character: Character set close. */
-      private static final char  PAT_SET_CLOSE = ']';
+    /** Default pattern character: Escape any special meaning. */
+    private static final char  PAT_ESCAPE = '\\';
+    /** Default pattern character: Any single character. */
+    private static final char  PAT_ANY = '.';
+    /** Default pattern character: Character set close. */
+    private static final char  PAT_SET_CLOSE = ']';
       
-      GlobFilter() {
-      }
+    GlobFilter() {
+    }
       
-      GlobFilter(String filePattern) throws IOException {
-        setRegex(filePattern);
-      }
+    GlobFilter(String filePattern) throws IOException {
+      setRegex(filePattern);
+    }
       
-      GlobFilter(String filePattern, PathFilter filter) throws IOException {
-        userFilter = filter;
-        setRegex(filePattern);
-      }
+    GlobFilter(String filePattern, PathFilter filter) throws IOException {
+      userFilter = filter;
+      setRegex(filePattern);
+    }
       
-      void setRegex(String filePattern) throws IOException {
-        int len;
-        int setOpen;
-        boolean setRange;
-        StringBuffer fileRegex = new StringBuffer();
-
-        // Validate the pattern
-        len = filePattern.length();
-        if (len == 0)
-          return;
-
-        setOpen = 0;
-        setRange = false;
+    void setRegex(String filePattern) throws IOException {
+      int len;
+      int setOpen;
+      boolean setRange;
+      StringBuffer fileRegex = new StringBuffer();
+
+      // Validate the pattern
+      len = filePattern.length();
+      if (len == 0)
+        return;
+
+      setOpen = 0;
+      setRange = false;
         
-        for (int i = 0; i < len; i++) {
-          char pCh;
+      for (int i = 0; i < len; i++) {
+        char pCh;
           
-          // Examine a single pattern character
-          pCh = filePattern.charAt(i);
-          if (pCh == PAT_ESCAPE) {
-            fileRegex.append(pCh);
-            i++;
-            if (i >= len)
-              error("An escaped character does not present", filePattern, i);
-            pCh = filePattern.charAt(i);
-          } else if (pCh == '.') {
-            fileRegex.append(PAT_ESCAPE);
-          } else if (pCh == '*') {
-            fileRegex.append(PAT_ANY);
-            hasPattern = true;
-          } else if (pCh == '?') {
-            pCh = PAT_ANY;
-            hasPattern = true;
-          } else if (pCh == '[' && setOpen == 0) {
-            setOpen++;
-            hasPattern = true;
-          } else if (pCh == '^' && setOpen > 0) {
-          } else if (pCh == '-' && setOpen > 0) {
-            // Character set range
-            setRange = true;
-          } else if (pCh == PAT_SET_CLOSE && setRange) {
-            // Incomplete character set range
-            error("Incomplete character set range", filePattern, i);
-          } else if (pCh == PAT_SET_CLOSE && setOpen > 0) {
-            // End of a character set
-            if (setOpen < 2)
-              error("Unexpected end of set", filePattern, i);
-            setOpen = 0;
-          } else if (setOpen > 0) {
-            // Normal character, or the end of a character set range
-            setOpen++;
-            setRange = false;
-          }
+        // Examine a single pattern character
+        pCh = filePattern.charAt(i);
+        if (pCh == PAT_ESCAPE) {
           fileRegex.append(pCh);
+          i++;
+          if (i >= len)
+            error("An escaped character does not present", filePattern, i);
+          pCh = filePattern.charAt(i);
+        } else if (pCh == '.') {
+          fileRegex.append(PAT_ESCAPE);
+        } else if (pCh == '*') {
+          fileRegex.append(PAT_ANY);
+          hasPattern = true;
+        } else if (pCh == '?') {
+          pCh = PAT_ANY;
+          hasPattern = true;
+        } else if (pCh == '[' && setOpen == 0) {
+          setOpen++;
+          hasPattern = true;
+        } else if (pCh == '^' && setOpen > 0) {
+        } else if (pCh == '-' && setOpen > 0) {
+          // Character set range
+          setRange = true;
+        } else if (pCh == PAT_SET_CLOSE && setRange) {
+          // Incomplete character set range
+          error("Incomplete character set range", filePattern, i);
+        } else if (pCh == PAT_SET_CLOSE && setOpen > 0) {
+          // End of a character set
+          if (setOpen < 2)
+            error("Unexpected end of set", filePattern, i);
+          setOpen = 0;
+        } else if (setOpen > 0) {
+          // Normal character, or the end of a character set range
+          setOpen++;
+          setRange = false;
         }
+        fileRegex.append(pCh);
+      }
         
-        // Check for a well-formed pattern
-        if (setOpen > 0 || setRange) {
-          // Incomplete character set or character range
-          error("Expecting set closure character or end of range", filePattern,
+      // Check for a well-formed pattern
+      if (setOpen > 0 || setRange) {
+        // Incomplete character set or character range
+        error("Expecting set closure character or end of range", filePattern,
               len);
-        }
-        regex = Pattern.compile(fileRegex.toString());
       }
+      regex = Pattern.compile(fileRegex.toString());
+    }
       
-      boolean hasPattern() {
-        return hasPattern;
-      }
+    boolean hasPattern() {
+      return hasPattern;
+    }
       
-      public boolean accept(Path path) {
-        return regex.matcher(path.getName()).matches() && userFilter.accept(path);
-      }
+    public boolean accept(Path path) {
+      return regex.matcher(path.getName()).matches() && userFilter.accept(path);
+    }
       
-      private void error(String s, String pattern, int pos) throws IOException {
-        throw new IOException("Illegal file pattern: "
-                                 +s+ " for glob "+ pattern + " at " + pos);
-      }
+    private void error(String s, String pattern, int pos) throws IOException {
+      throw new IOException("Illegal file pattern: "
+                            +s+ " for glob "+ pattern + " at " + pos);
     }
+  }
     
-    /**
-     * Set the current working directory for the given file system. All relative
-     * paths will be resolved relative to it.
-     * 
-     * @param new_dir
-     */
-    public abstract void setWorkingDirectory(Path new_dir);
+  /**
+   * Set the current working directory for the given file system. All relative
+   * paths will be resolved relative to it.
+   * 
+   * @param new_dir
+   */
+  public abstract void setWorkingDirectory(Path new_dir);
     
-    /**
-     * Get the current working directory for the given file system
-     * @return the directory pathname
-     */
-    public abstract Path getWorkingDirectory();
+  /**
+   * Get the current working directory for the given file system
+   * @return the directory pathname
+   */
+  public abstract Path getWorkingDirectory();
     
-    /**
-     * Make the given file and all non-existent parents into
-     * directories. Has the semantics of Unix 'mkdir -p'.
-     * Existence of the directory hierarchy is not an error.
-     */
-    public abstract boolean mkdirs(Path f) throws IOException;
-
-    /**
-     * Obtain a lock on the given Path
-     * 
-     * @deprecated FS does not support file locks anymore.
-     */
-    @Deprecated
-    public abstract void lock(Path f, boolean shared) throws IOException;
-
-    /**
-     * Release the lock
-     * 
-     * @deprecated FS does not support file locks anymore.     
-     */
-    @Deprecated
-    public abstract void release(Path f) throws IOException;
-
-    /**
-     * The src file is on the local disk.  Add it to FS at
-     * the given dst name and the source is kept intact afterwards
-     */
-    public void copyFromLocalFile(Path src, Path dst)
+  /**
+   * Make the given file and all non-existent parents into
+   * directories. Has the semantics of Unix 'mkdir -p'.
+   * Existence of the directory hierarchy is not an error.
+   */
+  public abstract boolean mkdirs(Path f) throws IOException;
+
+  /**
+   * Obtain a lock on the given Path
+   * 
+   * @deprecated FS does not support file locks anymore.
+   */
+  @Deprecated
+  public abstract void lock(Path f, boolean shared) throws IOException;
+
+  /**
+   * Release the lock
+   * 
+   * @deprecated FS does not support file locks anymore.     
+   */
+  @Deprecated
+  public abstract void release(Path f) throws IOException;
+
+  /**
+   * The src file is on the local disk.  Add it to FS at
+   * the given dst name and the source is kept intact afterwards
+   */
+  public void copyFromLocalFile(Path src, Path dst)
     throws IOException {
-      copyFromLocalFile(false, src, dst);
-    }
-
-    /**
-     * The src file is on the local disk.  Add it to FS at
-     * the given dst name, removing the source afterwards.
-     */
-    public void moveFromLocalFile(Path src, Path dst)
+    copyFromLocalFile(false, src, dst);
+  }
+
+  /**
+   * The src file is on the local disk.  Add it to FS at
+   * the given dst name, removing the source afterwards.
+   */
+  public void moveFromLocalFile(Path src, Path dst)
     throws IOException {
-      copyFromLocalFile(true, src, dst);
-    }
-
-    /**
-     * The src file is on the local disk.  Add it to FS at
-     * the given dst name.
-     * delSrc indicates if the source should be removed
-     */
-    public abstract void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+    copyFromLocalFile(true, src, dst);
+  }
+
+  /**
+   * The src file is on the local disk.  Add it to FS at
+   * the given dst name.
+   * delSrc indicates if the source should be removed
+   */
+  public abstract void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException;
     
-    /**
-     * The src file is under FS, and the dst is on the local disk.
-     * Copy it from FS control to the local dst name.
-     */
-    public void copyToLocalFile(Path src, Path dst) throws IOException {
-      copyToLocalFile(false, src, dst);
-    }
+  /**
+   * The src file is under FS, and the dst is on the local disk.
+   * Copy it from FS control to the local dst name.
+   */
+  public void copyToLocalFile(Path src, Path dst) throws IOException {
+    copyToLocalFile(false, src, dst);
+  }
     
-    /**
-     * The src file is under FS, and the dst is on the local disk.
-     * Copy it from FS control to the local dst name.
-     * Remove the source afterwards
-     */
-    public void moveToLocalFile(Path src, Path dst) throws IOException {
-      copyToLocalFile(true, src, dst);
-    }
-
-    /**
-     * The src file is under FS, and the dst is on the local disk.
-     * Copy it from FS control to the local dst name.
-     * delSrc indicates if the src will be removed or not.
-     */   
-    public abstract void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  /**
+   * The src file is under FS, and the dst is on the local disk.
+   * Copy it from FS control to the local dst name.
+   * Remove the source afterwards
+   */
+  public void moveToLocalFile(Path src, Path dst) throws IOException {
+    copyToLocalFile(true, src, dst);
+  }
+
+  /**
+   * The src file is under FS, and the dst is on the local disk.
+   * Copy it from FS control to the local dst name.
+   * delSrc indicates if the src will be removed or not.
+   */   
+  public abstract void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException;
 
-    /**
-     * Returns a local File that the user can write output to.  The caller
-     * provides both the eventual FS target name and the local working
-     * file.  If the FS is local, we write directly into the target.  If
-     * the FS is remote, we write into the tmp local area.
-     */
-    public abstract Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException;
-
-    /**
-     * Called when we're all done writing to the target.  A local FS will
-     * do nothing, because we've written to exactly the right place.  A remote
-     * FS will copy the contents of tmpLocalFile to the correct target at
-     * fsOutputFile.
-     */
-    public abstract void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException;
-
-    /**
-     * No more filesystem operations are needed.  Will
-     * release any held locks.
-     */
-    public void close() throws IOException {
-      URI uri = getUri();
-      synchronized (FileSystem.class) {
-        Map<String,FileSystem> authorityToFs = CACHE.get(uri.getScheme());
-        if (authorityToFs != null) {
-          authorityToFs.remove(uri.getAuthority());
-        }
+  /**
+   * Returns a local File that the user can write output to.  The caller
+   * provides both the eventual FS target name and the local working
+   * file.  If the FS is local, we write directly into the target.  If
+   * the FS is remote, we write into the tmp local area.
+   */
+  public abstract Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException;
+
+  /**
+   * Called when we're all done writing to the target.  A local FS will
+   * do nothing, because we've written to exactly the right place.  A remote
+   * FS will copy the contents of tmpLocalFile to the correct target at
+   * fsOutputFile.
+   */
+  public abstract void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile) throws IOException;
+
+  /**
+   * No more filesystem operations are needed.  Will
+   * release any held locks.
+   */
+  public void close() throws IOException {
+    URI uri = getUri();
+    synchronized (FileSystem.class) {
+      Map<String,FileSystem> authorityToFs = CACHE.get(uri.getScheme());
+      if (authorityToFs != null) {
+        authorityToFs.remove(uri.getAuthority());
       }
     }
+  }
 
-    /** Return the total size of all files in the filesystem.*/
-    public long getUsed() throws IOException{
-      long used = 0;
-      Path[] files = listPaths(new Path("/"));
-      for(Path file:files){
-        used += getContentLength(file);
-      }
-      return used;
+  /** Return the total size of all files in the filesystem.*/
+  public long getUsed() throws IOException{
+    long used = 0;
+    Path[] files = listPaths(new Path("/"));
+    for(Path file:files){
+      used += getContentLength(file);
     }
+    return used;
+  }
 
-    /**
-     * Get the block size for a particular file.
-     * @param f the filename
-     * @return the number of bytes in a block
-     */
-    public abstract long getBlockSize(Path f) throws IOException;
+  /**
+   * Get the block size for a particular file.
+   * @param f the filename
+   * @return the number of bytes in a block
+   */
+  public abstract long getBlockSize(Path f) throws IOException;
     
-    /** Return the number of bytes that large input files should be optimally
-     * be split into to minimize i/o time. */
-    public long getDefaultBlockSize() {
-      // default to 32MB: large enough to minimize the impact of seeks
-      return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
-    }
+  /** Return the number of bytes that large input files should be optimally
+   * be split into to minimize i/o time. */
+  public long getDefaultBlockSize() {
+    // default to 32MB: large enough to minimize the impact of seeks
+    return getConf().getLong("fs.local.block.size", 32 * 1024 * 1024);
+  }
     
-    /**
-     * Get the default replication.
-     */
-    public abstract short getDefaultReplication();
+  /**
+   * Get the default replication.
+   */
+  public abstract short getDefaultReplication();
 
 }

+ 21 - 21
src/java/org/apache/hadoop/fs/FileUtil.java

@@ -40,7 +40,7 @@ public class FileUtil {
     if (contents != null) {
       for (int i = 0; i < contents.length; i++) {
         if (contents[i].isFile()) {
-          if (! contents[i].delete()) {
+          if (!contents[i].delete()) {
             return false;
           }
         } else {
@@ -54,7 +54,7 @@ public class FileUtil {
           }
           // if not an empty directory or symlink let
           // fullydelete handle it.
-          if (! fullyDelete(contents[i])) {
+          if (!fullyDelete(contents[i])) {
             return false;
           }
         }
@@ -67,7 +67,7 @@ public class FileUtil {
   public static boolean copy(FileSystem srcFS, Path src, 
                              FileSystem dstFS, Path dst, 
                              boolean deleteSource,
-                             Configuration conf ) throws IOException {
+                             Configuration conf) throws IOException {
     dst = checkDest(src.getName(), dstFS, dst);
 
     if (srcFS.isDirectory(src)) {
@@ -117,7 +117,7 @@ public class FileUtil {
           InputStream in = srcFS.open(contents[i]);
           try {
             copyContent(in, out, conf, false);
-            if(addString!=null)
+            if (addString!=null)
               out.write(addString.getBytes("UTF-8"));
                 
           } finally {
@@ -141,7 +141,7 @@ public class FileUtil {
   public static boolean copy(File src,
                              FileSystem dstFS, Path dst,
                              boolean deleteSource,
-                             Configuration conf ) throws IOException {
+                             Configuration conf) throws IOException {
     dst = checkDest(src.getName(), dstFS, dst);
 
     if (src.isDirectory()) {
@@ -171,7 +171,7 @@ public class FileUtil {
   /** Copy FileSystem files to local files. */
   public static boolean copy(FileSystem srcFS, Path src, 
                              File dst, boolean deleteSource,
-                             Configuration conf ) throws IOException {
+                             Configuration conf) throws IOException {
 
     dst = checkDest(src.getName(), dst);
 
@@ -215,7 +215,7 @@ public class FileUtil {
         bytesRead = in.read(buf);
       }
     } finally {
-      if(close)
+      if (close)
         out.close();
     }
   }
@@ -284,7 +284,7 @@ public class FileUtil {
   public static void unZip(File inFile, File unzipDir) throws IOException {
     Enumeration entries;
     ZipFile zipFile = new ZipFile(inFile);
-    ;
+
     try {
       entries = zipFile.entries();
       while (entries.hasMoreElements()) {
@@ -334,44 +334,44 @@ public class FileUtil {
     private static String[] hardLinkCommand;
     
     static {
-      switch( getOSType() ) {
+      switch(getOSType()) {
       case OS_TYPE_WINXP:
-        hardLinkCommand = new String[] {"fsutil","hardlink","create",null,null};
+        hardLinkCommand = new String[] {"fsutil","hardlink","create", null, null};
         break;
       case OS_TYPE_UNIX:
       default:
-        hardLinkCommand = new String[] {"ln",null,null};
+        hardLinkCommand = new String[] {"ln", null, null};
       }
     }
 
     static OSType getOSType() {
       String osName = System.getProperty("os.name");
-      if( osName.indexOf( "Windows") >= 0 && 
-          (osName.indexOf( "XpP") >= 0 || osName.indexOf( "2003") >= 0 ) )
+      if (osName.indexOf("Windows") >= 0 && 
+          (osName.indexOf("XpP") >= 0 || osName.indexOf("2003") >= 0))
         return OSType.OS_TYPE_WINXP;
       else
         return OSType.OS_TYPE_UNIX;
     }
     
     public static void createHardLink(File target, 
-                                      File linkName ) throws IOException {
+                                      File linkName) throws IOException {
       int len = hardLinkCommand.length;
       hardLinkCommand[len-2] = target.getCanonicalPath();
       hardLinkCommand[len-1] = linkName.getCanonicalPath();
       // execute shell command
-      Process process = Runtime.getRuntime().exec( hardLinkCommand );
+      Process process = Runtime.getRuntime().exec(hardLinkCommand);
       try {
         if (process.waitFor() != 0) {
           String errMsg = new BufferedReader(new InputStreamReader(
                                                                    process.getInputStream())).readLine();
-          if( errMsg == null )  errMsg = "";
+          if (errMsg == null)  errMsg = "";
           String inpMsg = new BufferedReader(new InputStreamReader(
                                                                    process.getErrorStream())).readLine();
-          if( inpMsg == null )  inpMsg = "";
-          throw new IOException( errMsg + inpMsg );
+          if (inpMsg == null)  inpMsg = "";
+          throw new IOException(errMsg + inpMsg);
         }
       } catch (InterruptedException e) {
-        throw new IOException( StringUtils.stringifyException( e ));
+        throw new IOException(StringUtils.stringifyException(e));
       } finally {
         process.destroy();
       }
@@ -387,7 +387,7 @@ public class FileUtil {
    */
   public static int symLink(String target, String linkname) throws IOException{
     String cmd = "ln -s " + target + " " + linkname;
-    Process p = Runtime.getRuntime().exec( cmd, null );
+    Process p = Runtime.getRuntime().exec(cmd, null);
     int returnVal = -1;
     try{
       returnVal = p.waitFor();
@@ -408,7 +408,7 @@ public class FileUtil {
   public static int chmod(String filename, String perm
                           ) throws IOException, InterruptedException {
     String cmd = "chmod " + perm + " " + filename;
-    Process p = Runtime.getRuntime().exec( cmd, null );
+    Process p = Runtime.getRuntime().exec(cmd, null);
     return p.waitFor();
   }
 }

+ 11 - 11
src/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -44,7 +44,7 @@ public class FilterFileSystem extends FileSystem {
   
   protected FileSystem fs;
   
-  public FilterFileSystem( FileSystem fs) {
+  public FilterFileSystem(FileSystem fs) {
     this.fs = fs;
   }
 
@@ -114,13 +114,13 @@ public class FilterFileSystem extends FileSystem {
    * @param bufferSize the size of the buffer to be used.
    * @param replication required block replication for the file. 
    */
-  public FSDataOutputStream create( Path f, 
-                                    boolean overwrite,
-                                    int bufferSize,
-                                    short replication,
-                                    long blockSize,
-                                    Progressable progress
-                                    ) throws IOException {
+  public FSDataOutputStream create(Path f, 
+                                   boolean overwrite,
+                                   int bufferSize,
+                                   short replication,
+                                   long blockSize,
+                                   Progressable progress
+                                   ) throws IOException {
     return fs.create(f, overwrite, bufferSize, replication, blockSize, progress);
   }
 
@@ -217,7 +217,7 @@ public class FilterFileSystem extends FileSystem {
    * @deprecated FS does not support file locks anymore.
    */
   @Deprecated
-    public void lock(Path f, boolean shared) throws IOException {
+  public void lock(Path f, boolean shared) throws IOException {
     fs.lock(f, shared);
   }
 
@@ -227,7 +227,7 @@ public class FilterFileSystem extends FileSystem {
    * @deprecated FS does not support file locks anymore.     
    */
   @Deprecated
-    public void release(Path f) throws IOException {
+  public void release(Path f) throws IOException {
     fs.release(f);
   }
 
@@ -296,7 +296,7 @@ public class FilterFileSystem extends FileSystem {
   }
 
   @Override
-    public Configuration getConf() {
+  public Configuration getConf() {
     return fs.getConf();
   }
 }

+ 54 - 54
src/java/org/apache/hadoop/fs/FsShell.java

@@ -121,7 +121,7 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem.globPaths 
    */
   void copyToLocal(String[]argv, int pos) throws IOException {
-    if(argv.length-pos<2 || (argv.length-pos==2 && argv[pos].equalsIgnoreCase("-crc"))) {
+    if (argv.length-pos<2 || (argv.length-pos==2 && argv[pos].equalsIgnoreCase("-crc"))) {
       System.err.println("Usage: -get [-crc] <src> <dst>");
       System.exit(-1);
     }
@@ -132,19 +132,19 @@ public class FsShell extends ToolBase {
     }
     String srcf = argv[pos++];
     String dstf = argv[pos++];
-    if( dstf.equals("-")) {
+    if (dstf.equals("-")) {
       if (copyCrc) {
         System.err.println("-crc option is not valid when destination is stdout.");
       }
       cat(srcf);
     } else {
-      Path [] srcs = fs.globPaths( new Path(srcf) );
-      if( srcs.length > 1 && !new File( dstf ).isDirectory()) {
-        throw new IOException( "When copying multiple files, " 
-                               + "destination should be a directory." );
+      Path [] srcs = fs.globPaths(new Path(srcf));
+      if (srcs.length > 1 && !new File(dstf).isDirectory()) {
+        throw new IOException("When copying multiple files, " 
+                              + "destination should be a directory.");
       }
-      Path dst = new Path( dstf );
-      for( int i=0; i<srcs.length; i++ ) {
+      Path dst = new Path(dstf);
+      for(int i=0; i<srcs.length; i++) {
         ((DistributedFileSystem)fs).copyToLocalFile(srcs[i], dst, copyCrc);
       }
     }
@@ -178,9 +178,9 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem.globPaths 
    */
   void copyMergeToLocal(String srcf, Path dst, boolean endline) throws IOException {
-    Path [] srcs = fs.globPaths( new Path( srcf ) );
-    for( int i=0; i<srcs.length; i++ ) {
-      if(endline) {
+    Path [] srcs = fs.globPaths(new Path(srcf));
+    for(int i=0; i<srcs.length; i++) {
+      if (endline) {
         FileUtil.copyMerge(fs, srcs[i], 
                            FileSystem.getLocal(conf), dst, false, conf, "\n");
       } else {
@@ -206,8 +206,8 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem.globPaths 
    */
   void cat(String srcf) throws IOException {
-    Path [] srcs = fs.globPaths( new Path( srcf ) );
-    for( int i=0; i<srcs.length; i++ ) {
+    Path [] srcs = fs.globPaths(new Path(srcf));
+    for(int i=0; i<srcs.length; i++) {
       printToStdout(srcs[i]);
     }
   }
@@ -219,7 +219,7 @@ public class FsShell extends ToolBase {
    * @throws IOException 
    */
   private void setReplication(String[] cmd, int pos) throws IOException {
-    if(cmd.length-pos<2 || (cmd.length-pos==2 && cmd[pos].equalsIgnoreCase("-R"))) {
+    if (cmd.length-pos<2 || (cmd.length-pos==2 && cmd[pos].equalsIgnoreCase("-R"))) {
       System.err.println("Usage: [-R] <repvalue> <path>");
       System.exit(-1);
     }
@@ -227,7 +227,7 @@ public class FsShell extends ToolBase {
     boolean recursive = false;
     short rep = 3;
       
-    if("-R".equalsIgnoreCase(cmd[pos])) {
+    if ("-R".equalsIgnoreCase(cmd[pos])) {
       recursive=true;
       pos++;
         
@@ -256,16 +256,16 @@ public class FsShell extends ToolBase {
    */
   public void setReplication(short newRep, String srcf, boolean recursive)
     throws IOException {
-    Path[] srcs = fs.globPaths( new Path(srcf) );
-    for( int i=0; i<srcs.length; i++ ) {
-      setReplication( newRep, srcs[i], recursive );
+    Path[] srcs = fs.globPaths(new Path(srcf));
+    for(int i=0; i<srcs.length; i++) {
+      setReplication(newRep, srcs[i], recursive);
     }
   }
     
   private void setReplication(short newRep, Path src, boolean recursive)
     throws IOException {
   	
-    if(!fs.isDirectory(src)) {
+    if (!fs.isDirectory(src)) {
       setFileReplication(src, newRep);
       return;
     }
@@ -277,9 +277,9 @@ public class FsShell extends ToolBase {
 
       for (int i = 0; i < items.length; i++) {
         Path cur = items[i];
-        if(!fs.isDirectory(cur)) {
+        if (!fs.isDirectory(cur)) {
           setFileReplication(cur, newRep);
-        } else if(recursive) {
+        } else if (recursive) {
           setReplication(newRep, cur, recursive);
         }
       }
@@ -295,7 +295,7 @@ public class FsShell extends ToolBase {
    */
   private void setFileReplication(Path file, short newRep) throws IOException {
     	
-    if(fs.setReplication(file, newRep)) {
+    if (fs.setReplication(file, newRep)) {
       System.out.println("Replication " + newRep + " set: " + file);
     } else {
       System.err.println("Could not set replication for: " + file);
@@ -311,7 +311,7 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
   public void ls(String srcf, boolean recursive) throws IOException {
-    Path[] srcs = fs.globPaths( new Path(srcf) );
+    Path[] srcs = fs.globPaths(new Path(srcf));
     boolean printHeader = (srcs.length == 1) ? true: false;
     for(int i=0; i<srcs.length; i++) {
       ls(srcs[i], recursive, printHeader);
@@ -319,12 +319,12 @@ public class FsShell extends ToolBase {
   }
 
   /* list all files under the directory <i>src</i>*/
-  private void ls(Path src, boolean recursive, boolean printHeader ) throws IOException {
+  private void ls(Path src, boolean recursive, boolean printHeader) throws IOException {
     Path items[] = fs.listPaths(src);
     if (items == null) {
       throw new IOException("Could not get listing for " + src);
     } else {
-      if(!recursive && printHeader ) {
+      if (!recursive && printHeader) {
         System.out.println("Found " + items.length + " items");
       }
       for (int i = 0; i < items.length; i++) {
@@ -334,7 +334,7 @@ public class FsShell extends ToolBase {
                               "<dir>" : 
                               ("<r " + fs.getReplication(cur) 
                                + ">\t" + fs.getLength(cur))));
-        if(recursive && fs.isDirectory(cur)) {
+        if (recursive && fs.isDirectory(cur)) {
           ls(cur, recursive, printHeader);
         }
       }
@@ -348,7 +348,7 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
   public void du(String src) throws IOException {
-    Path items[] = fs.listPaths( fs.globPaths( new Path(src) ) );
+    Path items[] = fs.listPaths(fs.globPaths(new Path(src)));
     if (items == null) {
       throw new IOException("Could not get listing for " + src);
     } else {
@@ -368,12 +368,12 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
   public void dus(String src) throws IOException {
-    Path paths[] = fs.globPaths( new Path(src) );
-    if( paths==null && paths.length==0 ) {
-      throw new IOException( "dus: No match: " + src );
+    Path paths[] = fs.globPaths(new Path(src));
+    if (paths==null && paths.length==0) {
+      throw new IOException("dus: No match: " + src);
     }
     for(int i=0; i<paths.length; i++) {
-      Path items[] = fs.listPaths( paths[i] );
+      Path items[] = fs.listPaths(paths[i]);
       if (items != null) {
         long totalSize=0;
         for(int j=0; j<items.length; j++) {
@@ -407,13 +407,13 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
   public void rename(String srcf, String dstf) throws IOException {
-    Path [] srcs = fs.globPaths( new Path(srcf) );
+    Path [] srcs = fs.globPaths(new Path(srcf));
     Path dst = new Path(dstf);
-    if( srcs.length > 1 && !fs.isDirectory(dst)) {
-      throw new IOException( "When moving multiple files, " 
-                             + "destination should be a directory." );
+    if (srcs.length > 1 && !fs.isDirectory(dst)) {
+      throw new IOException("When moving multiple files, " 
+                            + "destination should be a directory.");
     }
-    for( int i=0; i<srcs.length; i++ ) {
+    for(int i=0; i<srcs.length; i++) {
       if (fs.rename(srcs[i], dst)) {
         System.out.println("Renamed " + srcs[i] + " to " + dstf);
       } else {
@@ -442,8 +442,8 @@ public class FsShell extends ToolBase {
     if (argv.length > 3) {
       Path dst = new Path(dest);
       if (!fs.isDirectory(dst)) {
-        throw new IOException( "When moving multiple files, " 
-                               + "destination " + dest + " should be a directory." );
+        throw new IOException("When moving multiple files, " 
+                              + "destination " + dest + " should be a directory.");
       }
     }
     //
@@ -493,13 +493,13 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
   public void copy(String srcf, String dstf, Configuration conf) throws IOException {
-    Path [] srcs = fs.globPaths( new Path(srcf) );
+    Path [] srcs = fs.globPaths(new Path(srcf));
     Path dst = new Path(dstf);
-    if( srcs.length > 1 && !fs.isDirectory(dst)) {
-      throw new IOException( "When copying multiple files, " 
-                             + "destination should be a directory." );
+    if (srcs.length > 1 && !fs.isDirectory(dst)) {
+      throw new IOException("When copying multiple files, " 
+                            + "destination should be a directory.");
     }
-    for( int i=0; i<srcs.length; i++ ) {
+    for(int i=0; i<srcs.length; i++) {
       FileUtil.copy(fs, srcs[i], fs, dst, false, conf);
     }
   }
@@ -524,8 +524,8 @@ public class FsShell extends ToolBase {
     if (argv.length > 3) {
       Path dst = new Path(dest);
       if (!fs.isDirectory(dst)) {
-        throw new IOException( "When copying multiple files, " 
-                               + "destination " + dest + " should be a directory." );
+        throw new IOException("When copying multiple files, " 
+                              + "destination " + dest + " should be a directory.");
       }
     }
     //
@@ -572,14 +572,14 @@ public class FsShell extends ToolBase {
    * @see org.apache.hadoop.fs.FileSystem#globPaths(Path)
    */
   public void delete(String srcf, boolean recursive) throws IOException {
-    Path [] srcs = fs.globPaths( new Path(srcf) );
-    for( int i=0; i<srcs.length; i++ ) {
+    Path [] srcs = fs.globPaths(new Path(srcf));
+    for(int i=0; i<srcs.length; i++) {
       delete(srcs[i], recursive);
     }
   }
     
   /* delete a file */
-  private void delete(Path src, boolean recursive ) throws IOException {
+  private void delete(Path src, boolean recursive) throws IOException {
     if (fs.isDirectory(src) && !recursive) {
       throw new IOException("Cannot remove directory \"" + src +
                             "\", use -rmr instead");
@@ -613,7 +613,7 @@ public class FsShell extends ToolBase {
     } else if (len < 1024 * 1024 * 1024) {
       val = (1.0 * len) / (1024 * 1024);
       ending = " MB";
-    } else if (len < 128L * 1024 * 1024 * 1024 ) {
+    } else if (len < 128L * 1024 * 1024 * 1024) {
       val = (1.0 * len) / (1024 * 1024 * 1024);
       ending = " GB";
     } else if (len < 1024L * 1024 * 1024 * 1024 * 1024) {
@@ -915,7 +915,7 @@ public class FsShell extends ToolBase {
       System.err.println("           [-fs <local | file system URI>]");
       System.err.println("           [-conf <configuration file>]");
       System.err.println("           [-D <[property=value>]");
-      System.err.println("           [-ls <path>]" );
+      System.err.println("           [-ls <path>]");
       System.err.println("           [-lsr <path>]");
       System.err.println("           [-du <path>]");
       System.err.println("           [-dus <path>]");
@@ -941,7 +941,7 @@ public class FsShell extends ToolBase {
   /**
    * run
    */
-  public int run( String argv[] ) throws Exception {
+  public int run(String argv[]) throws Exception {
 
     if (argv.length < 1) {
       printUsage(""); 
@@ -1001,7 +1001,7 @@ public class FsShell extends ToolBase {
       } else if ("-get".equals(cmd) || "-copyToLocal".equals(cmd)) {
         copyToLocal(argv, i);
       } else if ("-getmerge".equals(cmd)) {
-        if(argv.length>i+2)
+        if (argv.length>i+2)
           copyMergeToLocal(argv[i++], new Path(argv[i++]), Boolean.parseBoolean(argv[i++]));
         else
           copyMergeToLocal(argv[i++], new Path(argv[i++]));
@@ -1039,7 +1039,7 @@ public class FsShell extends ToolBase {
         } else {
           du("");
         }
-      } else if( "-dus".equals(cmd)) {
+      } else if ("-dus".equals(cmd)) {
         if (i < argv.length) {
           exitCode = doall(cmd, argv, conf, i);
         } else {
@@ -1072,7 +1072,7 @@ public class FsShell extends ToolBase {
         System.err.println(cmd.substring(1) + ": " + 
                            ex.getLocalizedMessage());  
       }
-    } catch (IOException e ) {
+    } catch (IOException e) {
       //
       // IO exception encountered locally.
       // 

+ 2 - 2
src/java/org/apache/hadoop/fs/InMemoryFileSystem.java

@@ -89,7 +89,7 @@ public class InMemoryFileSystem extends ChecksumFileSystem {
      */
     public String[][] getFileCacheHints(Path f, long start, long len)
       throws IOException {
-      if (! exists(f)) {
+      if (!exists(f)) {
         return null;
       } else {
         return new String[][] {{"inmemory"}};
@@ -194,7 +194,7 @@ public class InMemoryFileSystem extends ChecksumFileSystem {
                                      short replication, long blockSize, Progressable progress)
       throws IOException {
       synchronized (this) {
-        if (exists(f) && ! overwrite) {
+        if (exists(f) && !overwrite) {
           throw new IOException("File already exists:"+f);
         }
         FileAttributes fAttr =(FileAttributes) tempFileAttribs.remove(getPath(f));

+ 4 - 4
src/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -34,7 +34,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
     super(new RawLocalFileSystem());
   }
     
-  public LocalFileSystem( FileSystem rawLocalFileSystem ) {
+  public LocalFileSystem(FileSystem rawLocalFileSystem) {
     super(rawLocalFileSystem);
   }
     
@@ -44,13 +44,13 @@ public class LocalFileSystem extends ChecksumFileSystem {
   }
 
   @Override
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }
 
   @Override
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }
@@ -88,7 +88,7 @@ public class LocalFileSystem extends ChecksumFileSystem {
         }
       }
       String suffix = "." + new Random().nextInt();
-      File badFile = new File(badDir,f.getName()+suffix);
+      File badFile = new File(badDir, f.getName()+suffix);
       LOG.warn("Moving bad file " + f + " to " + badFile);
       in.close();                               // close it first
       f.renameTo(badFile);                      // rename it

+ 2 - 2
src/java/org/apache/hadoop/fs/Path.java

@@ -176,7 +176,7 @@ public class Path implements Comparable {
   public Path getParent() {
     String path = uri.getPath();
     int lastSlash = path.lastIndexOf('/');
-    int start = hasWindowsDrive(path,true) ? 3 : 0;
+    int start = hasWindowsDrive(path, true) ? 3 : 0;
     if ((path.length() == start) ||               // empty path
         (lastSlash == start && path.length() == start+1)) { // at root
       return null;
@@ -186,7 +186,7 @@ public class Path implements Comparable {
       parent = "";
     } else {
       int end = hasWindowsDrive(path, true) ? 3 : 0;
-      parent = path.substring(0,lastSlash==end?end+1:lastSlash);
+      parent = path.substring(0, lastSlash==end?end+1:lastSlash);
     }
     return new Path(uri.getScheme(), uri.getAuthority(), parent);
   }

+ 11 - 11
src/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -60,7 +60,7 @@ public class RawLocalFileSystem extends FileSystem {
    * Return null if otherwise.
    */
   public String[][] getFileCacheHints(Path f, long start, long len) throws IOException {
-    if (! exists(f)) {
+    if (!exists(f)) {
       return null;
     } else {
       String result[][] = new String[1][];
@@ -183,7 +183,7 @@ public class RawLocalFileSystem extends FileSystem {
   public FSDataOutputStream create(Path f, boolean overwrite, int bufferSize,
                                    short replication, long blockSize, Progressable progress)
     throws IOException {
-    if (exists(f) && ! overwrite) {
+    if (exists(f) && !overwrite) {
       throw new IOException("File already exists:"+f);
     }
     Path parent = f.getParent();
@@ -201,9 +201,9 @@ public class RawLocalFileSystem extends FileSystem {
   }
   
   /** Set the replication of the given file */
-  public boolean setReplication( Path src,
-                                 short replication
-                                 ) throws IOException {
+  public boolean setReplication(Path src,
+                                short replication
+                                ) throws IOException {
     return true;
   }
   
@@ -236,9 +236,9 @@ public class RawLocalFileSystem extends FileSystem {
     File localf = pathToFile(f);
     Path[] results;
     
-    if(!localf.exists())
+    if (!localf.exists())
       return null;
-    else if(localf.isFile()) {
+    else if (localf.isFile()) {
       results = new Path[1];
       results[0] = f;
       return results;
@@ -270,12 +270,12 @@ public class RawLocalFileSystem extends FileSystem {
    * Set the working directory to the given directory.
    */
   @Override
-    public void setWorkingDirectory(Path newDir) {
+  public void setWorkingDirectory(Path newDir) {
     workingDir = newDir;
   }
   
   @Override
-    public Path getWorkingDirectory() {
+  public Path getWorkingDirectory() {
     return workingDir;
   }
   
@@ -337,13 +337,13 @@ public class RawLocalFileSystem extends FileSystem {
   }
   
   @Override
-    public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyFromLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }
   
   @Override
-    public void copyToLocalFile(boolean delSrc, Path src, Path dst)
+  public void copyToLocalFile(boolean delSrc, Path src, Path dst)
     throws IOException {
     FileUtil.copy(this, src, this, dst, delSrc, getConf());
   }

+ 21 - 21
src/java/org/apache/hadoop/fs/s3/S3FileSystem.java

@@ -64,21 +64,21 @@ public class S3FileSystem extends FileSystem {
     FileSystemStore store = new Jets3tFileSystemStore();
     
     RetryPolicy basePolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-        conf.getInt("fs.s3.maxRetries", 4),
-        conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
+                                                                               conf.getInt("fs.s3.maxRetries", 4),
+                                                                               conf.getLong("fs.s3.sleepTimeSeconds", 10), TimeUnit.SECONDS);
     Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
       new HashMap<Class<? extends Exception>, RetryPolicy>();
     exceptionToPolicyMap.put(IOException.class, basePolicy);
     exceptionToPolicyMap.put(S3Exception.class, basePolicy);
     
     RetryPolicy methodPolicy = RetryPolicies.retryByException(
-        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
+                                                              RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
     Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
     methodNameToPolicyMap.put("storeBlock", methodPolicy);
     methodNameToPolicyMap.put("retrieveBlock", methodPolicy);
     
     return (FileSystemStore) RetryProxy.create(FileSystemStore.class,
-        store, methodNameToPolicyMap);
+                                               store, methodNameToPolicyMap);
   }
   
   @Override
@@ -116,7 +116,7 @@ public class S3FileSystem extends FileSystem {
       store.storeINode(absolutePath, INode.DIRECTORY_INODE);
     } else if (inode.isFile()) {
       throw new IOException(String.format(
-          "Can't make directory for path %s since it is a file.", absolutePath));
+                                          "Can't make directory for path %s since it is a file.", absolutePath));
     }
     Path parent = absolutePath.getParent();
     return (parent == null || mkdirs(parent));
@@ -167,8 +167,8 @@ public class S3FileSystem extends FileSystem {
 
   @Override
   public FSDataOutputStream create(Path file, boolean overwrite, int bufferSize,
-      short replication, long blockSize, Progressable progress)
-      throws IOException {
+                                   short replication, long blockSize, Progressable progress)
+    throws IOException {
 
     INode inode = store.retrieveINode(makeAbsolute(file));
     if (inode != null) {
@@ -185,16 +185,16 @@ public class S3FileSystem extends FileSystem {
         }
       }      
     }
-    return new FSDataOutputStream( 
-            new S3OutputStream(getConf(), store, makeAbsolute(file),
-                blockSize, progress), bufferSize );
+    return new FSDataOutputStream(
+                                  new S3OutputStream(getConf(), store, makeAbsolute(file),
+                                                     blockSize, progress), bufferSize);
   }
 
   @Override
   public FSDataInputStream open(Path path, int bufferSize) throws IOException {
     INode inode = checkFile(path);
-    return new FSDataInputStream( new S3InputStream(getConf(), store, inode),
-            bufferSize);
+    return new FSDataInputStream(new S3InputStream(getConf(), store, inode),
+                                 bufferSize);
   }
 
   @Override
@@ -262,7 +262,7 @@ public class S3FileSystem extends FileSystem {
         return false;
       }
       for (Path p : contents) {
-        if (! delete(p)) {
+        if (!delete(p)) {
           return false;
         }
       }
@@ -301,7 +301,7 @@ public class S3FileSystem extends FileSystem {
    */
   @Override
   public boolean setReplication(Path path, short replication)
-      throws IOException {
+    throws IOException {
     return true;
   }
 
@@ -328,7 +328,7 @@ public class S3FileSystem extends FileSystem {
    */
   @Override
   public String[][] getFileCacheHints(Path f, long start, long len)
-      throws IOException {
+    throws IOException {
     // TODO: Check this is the correct behavior
     if (!exists(f)) {
       return null;
@@ -337,14 +337,14 @@ public class S3FileSystem extends FileSystem {
   }
 
   /** @deprecated */ @Deprecated
-  @Override
-  public void lock(Path path, boolean shared) throws IOException {
+    @Override
+    public void lock(Path path, boolean shared) throws IOException {
     // TODO: Design and implement
   }
 
   /** @deprecated */ @Deprecated
-  @Override
-  public void release(Path path) throws IOException {
+    @Override
+    public void release(Path path) throws IOException {
     // TODO: Design and implement
   }
 
@@ -360,13 +360,13 @@ public class S3FileSystem extends FileSystem {
 
   @Override
   public Path startLocalOutput(Path fsOutputFile, Path tmpLocalFile)
-      throws IOException {
+    throws IOException {
     return tmpLocalFile;
   }
 
   @Override
   public void completeLocalOutput(Path fsOutputFile, Path tmpLocalFile)
-      throws IOException {
+    throws IOException {
     moveFromLocalFile(tmpLocalFile, fsOutputFile);
   }
 

+ 46 - 46
src/java/org/apache/hadoop/fs/s3/S3InputStream.java

@@ -37,65 +37,65 @@ class S3InputStream extends FSInputStream {
   }
 
   @Override
-    public synchronized long getPos() throws IOException {
-      return pos;
-    }
+  public synchronized long getPos() throws IOException {
+    return pos;
+  }
 
   @Override
-    public synchronized int available() throws IOException {
-      return (int) (fileLength - pos);
-    }
+  public synchronized int available() throws IOException {
+    return (int) (fileLength - pos);
+  }
 
   @Override
-    public synchronized void seek(long targetPos) throws IOException {
-      if (targetPos > fileLength) {
-        throw new IOException("Cannot seek after EOF");
-      }
-      pos = targetPos;
-      blockEnd = -1;
+  public synchronized void seek(long targetPos) throws IOException {
+    if (targetPos > fileLength) {
+      throw new IOException("Cannot seek after EOF");
     }
+    pos = targetPos;
+    blockEnd = -1;
+  }
 
   @Override
-    public synchronized boolean seekToNewSource(long targetPos) throws IOException {
-      return false;
-    }
+  public synchronized boolean seekToNewSource(long targetPos) throws IOException {
+    return false;
+  }
 
   @Override
-    public synchronized int read() throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
+  public synchronized int read() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+    int result = -1;
+    if (pos < fileLength) {
+      if (pos > blockEnd) {
+        blockSeekTo(pos);
       }
-      int result = -1;
-      if (pos < fileLength) {
-        if (pos > blockEnd) {
-          blockSeekTo(pos);
-        }
-        result = blockStream.read();
-        if (result >= 0) {
-          pos++;
-        }
+      result = blockStream.read();
+      if (result >= 0) {
+        pos++;
       }
-      return result;
     }
+    return result;
+  }
 
   @Override
-    public synchronized int read(byte buf[], int off, int len) throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
+  public synchronized int read(byte buf[], int off, int len) throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+    if (pos < fileLength) {
+      if (pos > blockEnd) {
+        blockSeekTo(pos);
       }
-      if (pos < fileLength) {
-        if (pos > blockEnd) {
-          blockSeekTo(pos);
-        }
-        int realLen = Math.min(len, (int) (blockEnd - pos + 1));
-        int result = blockStream.read(buf, off, realLen);
-        if (result >= 0) {
-          pos += result;
-        }
-        return result;
+      int realLen = Math.min(len, (int) (blockEnd - pos + 1));
+      int result = blockStream.read(buf, off, realLen);
+      if (result >= 0) {
+        pos += result;
       }
-      return -1;
+      return result;
     }
+    return -1;
+  }
 
   private synchronized void blockSeekTo(long target) throws IOException {
     //
@@ -132,7 +132,7 @@ class S3InputStream extends FSInputStream {
   }
 
   @Override
-    public void close() throws IOException {
+  public void close() throws IOException {
     if (closed) {
       throw new IOException("Stream closed");
     }
@@ -151,17 +151,17 @@ class S3InputStream extends FSInputStream {
    * We don't support marks.
    */
   @Override
-    public boolean markSupported() {
+  public boolean markSupported() {
     return false;
   }
 
   @Override
-    public void mark(int readLimit) {
+  public void mark(int readLimit) {
     // Do nothing
   }
 
   @Override
-    public void reset() throws IOException {
+  public void reset() throws IOException {
     throw new IOException("Mark not supported");
   }
 

+ 49 - 49
src/java/org/apache/hadoop/fs/s3/S3OutputStream.java

@@ -74,52 +74,52 @@ class S3OutputStream extends OutputStream {
   }
 
   @Override
-    public synchronized void write(int b) throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
-      }
+  public synchronized void write(int b) throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
 
-      if ((bytesWrittenToBlock + pos == blockSize) || (pos >= bufferSize)) {
-        flush();
-      }
-      outBuf[pos++] = (byte) b;
-      filePos++;
+    if ((bytesWrittenToBlock + pos == blockSize) || (pos >= bufferSize)) {
+      flush();
     }
+    outBuf[pos++] = (byte) b;
+    filePos++;
+  }
 
   @Override
-    public synchronized void write(byte b[], int off, int len) throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
-      }
-      while (len > 0) {
-        int remaining = bufferSize - pos;
-        int toWrite = Math.min(remaining, len);
-        System.arraycopy(b, off, outBuf, pos, toWrite);
-        pos += toWrite;
-        off += toWrite;
-        len -= toWrite;
-        filePos += toWrite;
-
-        if ((bytesWrittenToBlock + pos >= blockSize) || (pos == bufferSize)) {
-          flush();
-        }
+  public synchronized void write(byte b[], int off, int len) throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
+    while (len > 0) {
+      int remaining = bufferSize - pos;
+      int toWrite = Math.min(remaining, len);
+      System.arraycopy(b, off, outBuf, pos, toWrite);
+      pos += toWrite;
+      off += toWrite;
+      len -= toWrite;
+      filePos += toWrite;
+
+      if ((bytesWrittenToBlock + pos >= blockSize) || (pos == bufferSize)) {
+        flush();
       }
     }
+  }
 
   @Override
-    public synchronized void flush() throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
-      }
+  public synchronized void flush() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
 
-      if (bytesWrittenToBlock + pos >= blockSize) {
-        flushData((int) blockSize - bytesWrittenToBlock);
-      }
-      if (bytesWrittenToBlock == blockSize) {
-        endBlock();
-      }
-      flushData(pos);
+    if (bytesWrittenToBlock + pos >= blockSize) {
+      flushData((int) blockSize - bytesWrittenToBlock);
+    }
+    if (bytesWrittenToBlock == blockSize) {
+      endBlock();
     }
+    flushData(pos);
+  }
 
   private synchronized void flushData(int maxPos) throws IOException {
     int workingPos = Math.min(pos, maxPos);
@@ -179,22 +179,22 @@ class S3OutputStream extends OutputStream {
   }
 
   @Override
-    public synchronized void close() throws IOException {
-      if (closed) {
-        throw new IOException("Stream closed");
-      }
+  public synchronized void close() throws IOException {
+    if (closed) {
+      throw new IOException("Stream closed");
+    }
 
-      flush();
-      if (filePos == 0 || bytesWrittenToBlock != 0) {
-        endBlock();
-      }
+    flush();
+    if (filePos == 0 || bytesWrittenToBlock != 0) {
+      endBlock();
+    }
 
-      backupStream.close();
-      backupFile.delete();
+    backupStream.close();
+    backupFile.delete();
 
-      super.close();
+    super.close();
 
-      closed = true;
-    }
+    closed = true;
+  }
 
 }

+ 1 - 1
src/java/org/apache/hadoop/io/BytesWritable.java

@@ -197,7 +197,7 @@ public class BytesWritable implements WritableComparable {
                        byte[] b2, int s2, int l2) {
       int size1 = readInt(b1, s1);
       int size2 = readInt(b2, s2);
-      return compareBytes(b1,s1+4, size1, b2, s2+4, size2);
+      return compareBytes(b1, s1+4, size1, b2, s2+4, size2);
     }
   }
   

+ 1 - 1
src/java/org/apache/hadoop/io/GenericWritable.java

@@ -70,7 +70,7 @@ public abstract class GenericWritable implements Writable {
       }
     }
     throw new RuntimeException("The type of instance is: "
-                + instance.getClass() + ", which is NOT registered.");
+                               + instance.getClass() + ", which is NOT registered.");
   }
 
   /**

+ 17 - 17
src/java/org/apache/hadoop/io/MapFile.java

@@ -81,7 +81,7 @@ public class MapFile {
                   Class keyClass, Class valClass,
                   CompressionType compress, Progressable progress)
       throws IOException {
-      this(conf,fs,dirName,WritableComparator.get(keyClass),valClass,
+      this(conf, fs, dirName, WritableComparator.get(keyClass), valClass,
            compress, progress);
     }
 
@@ -89,7 +89,7 @@ public class MapFile {
     public Writer(Configuration conf, FileSystem fs, String dirName,
                   Class keyClass, Class valClass, CompressionType compress)
       throws IOException {
-      this(conf,fs,dirName,WritableComparator.get(keyClass),valClass,compress);
+      this(conf, fs, dirName, WritableComparator.get(keyClass), valClass, compress);
     }
 
     /** Create the named map using the named key comparator. */
@@ -101,8 +101,8 @@ public class MapFile {
     }
     /** Create the named map using the named key comparator. */
     public Writer(Configuration conf, FileSystem fs, String dirName,
-                 WritableComparator comparator, Class valClass,
-                 SequenceFile.CompressionType compress)
+                  WritableComparator comparator, Class valClass,
+                  SequenceFile.CompressionType compress)
       throws IOException {
       this(conf, fs, dirName, comparator, valClass, compress, null);
     }
@@ -118,7 +118,7 @@ public class MapFile {
 
       Path dir = new Path(dirName);
       if (!fs.mkdirs(dir)) {
-          throw new IOException("Mkdirs failed to create directory " + dir.toString());
+        throw new IOException("Mkdirs failed to create directory " + dir.toString());
       }
       Path dataFile = new Path(dir, DATA_FILE_NAME);
       Path indexFile = new Path(dir, INDEX_FILE_NAME);
@@ -126,7 +126,7 @@ public class MapFile {
       Class keyClass = comparator.getKeyClass();
       this.data =
         SequenceFile.createWriter
-        (fs,conf,dataFile,keyClass,valClass,compress,progress);
+        (fs, conf, dataFile, keyClass, valClass, compress, progress);
       this.index =
         SequenceFile.createWriter
         (fs, conf, indexFile, keyClass, LongWritable.class,
@@ -182,8 +182,8 @@ public class MapFile {
   public static class Reader {
       
     /** Number of index entries to skip between each entry.  Zero by default.
-    * Setting this to values larger than zero can facilitate opening large map
-    * files using less memory. */
+     * Setting this to values larger than zero can facilitate opening large map
+     * files using less memory. */
     private int INDEX_SKIP = 0;
       
     private WritableComparator comparator;
@@ -286,7 +286,7 @@ public class MapFile {
         }
       } catch (EOFException e) {
         SequenceFile.LOG.warn("Unexpected EOF reading " + index +
-                                 " at entry #" + count + ".  Ignoring.");
+                              " at entry #" + count + ".  Ignoring.");
       } finally {
 	indexClosed = true;
         index.close();
@@ -306,7 +306,7 @@ public class MapFile {
 
       readIndex();
       int pos = ((count - 1) / 2);              // middle of the index
-      if(pos < 0) {
+      if (pos < 0) {
         throw new IOException("MapFile empty");
       }
       
@@ -357,7 +357,7 @@ public class MapFile {
 
       if (seekIndex != -1                         // seeked before
           && seekIndex+1 < count           
-          && comparator.compare(key,keys[seekIndex+1])<0 // before next indexed
+          && comparator.compare(key, keys[seekIndex+1])<0 // before next indexed
           && comparator.compare(key, nextKey)
           >= 0) {                                 // but after last seeked
         // do nothing
@@ -431,9 +431,9 @@ public class MapFile {
      * @return          - returns the key that was the closest match or null if eof.
      */
     public synchronized WritableComparable getClosest(WritableComparable key, Writable val)
-        throws IOException {
+      throws IOException {
       
-      if(seekInternal(key) > 0) {
+      if (seekInternal(key) > 0) {
         return null;
       }
       data.getCurrentValue(val);
@@ -442,7 +442,7 @@ public class MapFile {
 
     /** Close the map. */
     public synchronized void close() throws IOException {
-      if (! indexClosed) {
+      if (!indexClosed) {
 	index.close();
       }
       data.close();
@@ -482,7 +482,7 @@ public class MapFile {
    * @throws Exception
    */
   public static long fix(FileSystem fs, Path dir,
-          Class keyClass, Class valueClass, boolean dryrun, Configuration conf) throws Exception {
+                         Class keyClass, Class valueClass, boolean dryrun, Configuration conf) throws Exception {
     String dr = (dryrun ? "[DRY RUN ] " : "");
     Path data = new Path(dir, DATA_FILE_NAME);
     Path index = new Path(dir, INDEX_FILE_NAME);
@@ -498,11 +498,11 @@ public class MapFile {
     SequenceFile.Reader dataReader = new SequenceFile.Reader(fs, data, conf);
     if (!dataReader.getKeyClass().equals(keyClass)) {
       throw new Exception(dr + "Wrong key class in " + dir + ", expected" + keyClass.getName() +
-              ", got " + dataReader.getKeyClass().getName());
+                          ", got " + dataReader.getKeyClass().getName());
     }
     if (!dataReader.getValueClass().equals(valueClass)) {
       throw new Exception(dr + "Wrong value class in " + dir + ", expected" + valueClass.getName() +
-              ", got " + dataReader.getValueClass().getName());
+                          ", got " + dataReader.getValueClass().getName());
     }
     long cnt = 0L;
     Writable key = (Writable)ReflectionUtils.newInstance(keyClass, conf);

+ 5 - 5
src/java/org/apache/hadoop/io/ObjectWritable.java

@@ -147,8 +147,8 @@ public class ObjectWritable implements Writable, Configurable {
       } else {
         throw new IllegalArgumentException("Not a primitive: "+declaredClass);
       }
-    } else if (declaredClass.isEnum() ) {         // enum
-      UTF8.writeString( out, ((Enum)instance).name() );
+    } else if (declaredClass.isEnum()) {         // enum
+      UTF8.writeString(out, ((Enum)instance).name());
     } else if (Writable.class.isAssignableFrom(declaredClass)) { // Writable
       UTF8.writeString(out, instance.getClass().getName());
       ((Writable)instance).write(out);
@@ -169,7 +169,7 @@ public class ObjectWritable implements Writable, Configurable {
   /** Read a {@link Writable}, {@link String}, primitive type, or an array of
    * the preceding. */
   @SuppressWarnings("unchecked")
-    public static Object readObject(DataInput in, ObjectWritable objectWritable, Configuration conf)
+  public static Object readObject(DataInput in, ObjectWritable objectWritable, Configuration conf)
     throws IOException {
     String className = UTF8.readString(in);
     Class<?> declaredClass = PRIMITIVE_NAMES.get(className);
@@ -216,8 +216,8 @@ public class ObjectWritable implements Writable, Configurable {
       
     } else if (declaredClass == String.class) {        // String
       instance = UTF8.readString(in);
-    } else if( declaredClass.isEnum() ) {         // enum
-      instance = Enum.valueOf( (Class<? extends Enum>) declaredClass, UTF8.readString(in) );
+    } else if (declaredClass.isEnum()) {         // enum
+      instance = Enum.valueOf((Class<? extends Enum>) declaredClass, UTF8.readString(in));
     } else {                                      // Writable
       Class instanceClass = null;
       try {

+ 4 - 4
src/java/org/apache/hadoop/io/SequenceFile.java

@@ -109,7 +109,7 @@ public class SequenceFile {
     createWriter(FileSystem fs, Configuration conf, Path name, 
                  Class keyClass, Class valClass) 
     throws IOException {
-    return createWriter(fs,conf,name,keyClass,valClass,
+    return createWriter(fs, conf, name, keyClass, valClass,
                         getCompressionType(conf));
   }
   
@@ -679,7 +679,7 @@ public class SequenceFile {
       out.writeBoolean(this.isCompressed());
       out.writeBoolean(this.isBlockCompressed());
       
-      if(this.isCompressed()) {
+      if (this.isCompressed()) {
         Text.writeString(out, (codec.getClass()).getName());
       }
       this.metadata.write(out);
@@ -698,7 +698,7 @@ public class SequenceFile {
       this.compress = compress;
       this.codec = codec;
       this.metadata = metadata;
-      if(this.codec != null) {
+      if (this.codec != null) {
         ReflectionUtils.setConf(this.codec, this.conf);
         this.deflateFilter = this.codec.createOutputStream(buffer);
         this.deflateOut = 
@@ -2351,7 +2351,7 @@ public class SequenceFile {
         rawKey.reset();
         rawKey.write(ms.getKey().getData(), 0, ms.getKey().getLength());
         //load the raw value. Re-use the existing rawValue buffer
-        if(rawValue == null)
+        if (rawValue == null)
           rawValue = ms.in.createValueBytes();
         int valLength = ms.nextRawValue(rawValue);
 

+ 2 - 2
src/java/org/apache/hadoop/io/Text.java

@@ -202,7 +202,7 @@ public class Text implements WritableComparable {
    * increased to match. The existing contents of the buffer
    * (if any) are deleted.
    */
-  private void setCapacity( int len ) {
+  private void setCapacity(int len) {
     if (bytes == null || bytes.length < len)
       bytes = new byte[len];      
   }
@@ -246,7 +246,7 @@ public class Text implements WritableComparable {
   /** Compare two Texts bytewise using standard UTF8 ordering. */
   public int compareTo(Object o) {
     Text that = (Text)o;
-    if(this == that)
+    if (this == that)
       return 0;
     else
       return WritableComparator.compareBytes(bytes, 0, length,

+ 1 - 1
src/java/org/apache/hadoop/io/VersionedWritable.java

@@ -45,7 +45,7 @@ public abstract class VersionedWritable implements Writable {
   public void readFields(DataInput in) throws IOException {
     byte version = in.readByte();                 // read version
     if (version != getVersion())
-      throw new VersionMismatchException(getVersion(),version);
+      throw new VersionMismatchException(getVersion(), version);
   }
 
     

+ 16 - 16
src/java/org/apache/hadoop/io/WritableComparator.java

@@ -175,21 +175,21 @@ public class WritableComparator implements Comparator {
    * @return deserialized long
    */
   public static long readVLong(byte[] bytes, int start) throws IOException {
-      int len = bytes[start];
-      if (len >= -112) {
-          return len;
-      }
-      boolean isNegative = (len < -120);
-      len = isNegative ? -(len + 120) : -(len + 112);
-      if (start+1+len>bytes.length)
-          throw new IOException(
-                  "Not enough number of bytes for a zero-compressed integer");
-      long i = 0;
-      for (int idx = 0; idx < len; idx++) {
-          i = i << 8;
-          i = i | (bytes[start+1+idx] & 0xFF);
-      }
-      return (isNegative ? (i ^ -1L) : i);
+    int len = bytes[start];
+    if (len >= -112) {
+      return len;
+    }
+    boolean isNegative = (len < -120);
+    len = isNegative ? -(len + 120) : -(len + 112);
+    if (start+1+len>bytes.length)
+      throw new IOException(
+                            "Not enough number of bytes for a zero-compressed integer");
+    long i = 0;
+    for (int idx = 0; idx < len; idx++) {
+      i = i << 8;
+      i = i | (bytes[start+1+idx] & 0xFF);
+    }
+    return (isNegative ? (i ^ -1L) : i);
   }
   
   /**
@@ -200,6 +200,6 @@ public class WritableComparator implements Comparator {
    * @return deserialized integer
    */
   public static int readVInt(byte[] bytes, int start) throws IOException {
-      return (int) readVLong(bytes, start);
+    return (int) readVLong(bytes, start);
   }
 }

+ 6 - 6
src/java/org/apache/hadoop/io/WritableUtils.java

@@ -37,8 +37,8 @@ public final class WritableUtils  {
     byte[] outbuf = new byte[length];
     ByteArrayOutputStream bos =  new ByteArrayOutputStream();
     int len;
-    while((len=gzi.read(outbuf,0,outbuf.length)) != -1){
-      bos.write(outbuf,0,len);
+    while((len=gzi.read(outbuf, 0, outbuf.length)) != -1){
+      bos.write(outbuf, 0, len);
     }
     byte[] decompressed =  bos.toByteArray();
     bos.close();
@@ -55,12 +55,12 @@ public final class WritableUtils  {
     if (bytes != null) {
       ByteArrayOutputStream bos =  new ByteArrayOutputStream();
       GZIPOutputStream gzout = new GZIPOutputStream(bos);
-      gzout.write(bytes,0,bytes.length);
+      gzout.write(bytes, 0, bytes.length);
       gzout.close();
       byte[] buffer = bos.toByteArray();
       int len = buffer.length;
       out.writeInt(len);
-      out.write(buffer,0,len);
+      out.write(buffer, 0, len);
       /* debug only! Once we have confidence, can lose this. */
       return ((bytes.length != 0) ? (100*buffer.length)/bytes.length : 0);
     } else {
@@ -94,7 +94,7 @@ public final class WritableUtils  {
       byte[] buffer = s.getBytes("UTF-8");
       int len = buffer.length;
       out.writeInt(len);
-      out.write(buffer,0,len);
+      out.write(buffer, 0, len);
     } else {
       out.writeInt(-1);
     }
@@ -183,7 +183,7 @@ public final class WritableUtils  {
    */
   public static void displayByteArray(byte[] record){
     int i;
-    for(i=0;i < record.length -1 ; i++){
+    for(i=0;i < record.length -1; i++){
       if (i % 16 == 0) { System.out.println(); }
       System.out.print(Integer.toHexString(record[i]  >> 4 & 0x0F));
       System.out.print(Integer.toHexString(record[i] & 0x0F));

+ 2 - 2
src/java/org/apache/hadoop/io/compress/BlockCompressorStream.java

@@ -44,7 +44,7 @@ class BlockCompressorStream extends CompressorStream {
    *                            algorithm with given bufferSize
    */
   public BlockCompressorStream(OutputStream out, Compressor compressor, 
-      int bufferSize, int compressionOverhead) {
+                               int bufferSize, int compressionOverhead) {
     super(out, compressor, bufferSize);
     MAX_INPUT_SIZE = bufferSize - compressionOverhead;
   }
@@ -70,7 +70,7 @@ class BlockCompressorStream extends CompressorStream {
     if (b == null) {
       throw new NullPointerException();
     } else if ((off < 0) || (off > b.length) || (len < 0) ||
-            ((off + len) > b.length)) {
+               ((off + len) > b.length)) {
       throw new IndexOutOfBoundsException();
     } else if (len == 0) {
       return;

+ 2 - 2
src/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java

@@ -41,7 +41,7 @@ class BlockDecompressorStream extends DecompressorStream {
    * @param bufferSize size of buffer
    */
   public BlockDecompressorStream(InputStream in, Decompressor decompressor, 
-      int bufferSize) {
+                                 int bufferSize) {
     super(in, decompressor, bufferSize);
   }
   
@@ -123,7 +123,7 @@ class BlockDecompressorStream extends DecompressorStream {
     int b3 = in.read();
     int b4 = in.read();
     if ((b1 | b2 | b3 | b4) < 0)
-        throw new EOFException();
+      throw new EOFException();
     return ((b1 << 24) + (b2 << 16) + (b3 << 8) + (b4 << 0));
   }
 }

+ 1 - 1
src/java/org/apache/hadoop/io/compress/CompressionCodecFactory.java

@@ -122,7 +122,7 @@ public class CompressionCodecFactory {
         buf.append(itr.next().getName());
       }
     }
-    conf.set("io.compression.codecs",buf.toString());   
+    conf.set("io.compression.codecs", buf.toString());   
   }
   
   /**

+ 19 - 19
src/java/org/apache/hadoop/io/compress/LzoCodec.java

@@ -43,11 +43,11 @@ public class LzoCodec implements Configurable, CompressionCodec {
   private Configuration conf;
   
   public void setConf(Configuration conf) {
-	  this.conf = conf;
+    this.conf = conf;
   }
   
   public Configuration getConf() {
-	  return conf;
+    return conf;
   }
 
   private static boolean nativeLzoLoaded = false;
@@ -55,7 +55,7 @@ public class LzoCodec implements Configurable, CompressionCodec {
   static {
     if (NativeCodeLoader.isNativeCodeLoaded()) {
       nativeLzoLoaded = LzoCompressor.isNativeLzoLoaded() &&
-                          LzoDecompressor.isNativeLzoLoaded();
+        LzoDecompressor.isNativeLzoLoaded();
       
       if (nativeLzoLoaded) {
         LOG.info("Successfully loaded & initialized native-lzo library");
@@ -78,7 +78,7 @@ public class LzoCodec implements Configurable, CompressionCodec {
   }
   
   public CompressionOutputStream createOutputStream(OutputStream out) 
-  throws IOException {
+    throws IOException {
     // Ensure native-lzo library is loaded & initialized
     if (!isNativeLzoLoaded()) {
       throw new IOException("native-lzo library not available");
@@ -107,12 +107,12 @@ public class LzoCodec implements Configurable, CompressionCodec {
     // Create the lzo output-stream
     LzoCompressor.CompressionStrategy strategy = 
       LzoCompressor.CompressionStrategy.valueOf(
-              conf.get("io.compression.codec.lzo.compressor",
-                        LzoCompressor.CompressionStrategy.LZO1X_1.name()
-                      )
-                    ); 
+                                                conf.get("io.compression.codec.lzo.compressor",
+                                                         LzoCompressor.CompressionStrategy.LZO1X_1.name()
+                                                         )
+                                                ); 
     int bufferSize = conf.getInt("io.compression.codec.lzo.buffersize", 
-                                  64*1024);
+                                 64*1024);
     int compressionOverhead = 0;
     if (strategy.name().contains("LZO1")) {
       compressionOverhead = (int)(((bufferSize - (64 + 3)) * 16.0) / 17.0);  
@@ -121,12 +121,12 @@ public class LzoCodec implements Configurable, CompressionCodec {
     }
      
     return new BlockCompressorStream(out, 
-            new LzoCompressor(strategy, bufferSize), 
-            bufferSize, compressionOverhead);
+                                     new LzoCompressor(strategy, bufferSize), 
+                                     bufferSize, compressionOverhead);
   }
   
   public CompressionInputStream createInputStream(InputStream in) 
-  throws IOException {
+    throws IOException {
     // Ensure native-lzo library is loaded & initialized
     if (!isNativeLzoLoaded()) {
       throw new IOException("native-lzo library not available");
@@ -135,16 +135,16 @@ public class LzoCodec implements Configurable, CompressionCodec {
     // Create the lzo input-stream
     LzoDecompressor.CompressionStrategy strategy = 
       LzoDecompressor.CompressionStrategy.valueOf(
-              conf.get("io.compression.codec.lzo.decompressor",
-                        LzoDecompressor.CompressionStrategy.LZO1X.name()
-                      )
-                    ); 
+                                                  conf.get("io.compression.codec.lzo.decompressor",
+                                                           LzoDecompressor.CompressionStrategy.LZO1X.name()
+                                                           )
+                                                  ); 
     int bufferSize = conf.getInt("io.compression.codec.lzo.buffersize", 
-                                  64*1024);
+                                 64*1024);
 
     return new BlockDecompressorStream(in, 
-            new LzoDecompressor(strategy, bufferSize), 
-            bufferSize);
+                                       new LzoDecompressor(strategy, bufferSize), 
+                                       bufferSize);
   }
   
   /**

+ 3 - 3
src/java/org/apache/hadoop/io/compress/lzo/LzoCompressor.java

@@ -153,7 +153,7 @@ public class LzoCompressor implements Compressor {
       nativeLzoLoaded = true;
     } else {
       LOG.error("Cannot load " + LzoCompressor.class.getName() + 
-              " without native-hadoop library!");
+                " without native-hadoop library!");
     }
   }
   
@@ -220,7 +220,7 @@ public class LzoCompressor implements Compressor {
     // Reinitialize lzo's input direct buffer
     uncompressedDirectBuf.rewind();
     ((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff,  
-                                          uncompressedDirectBufLen);
+                                            uncompressedDirectBufLen);
 
     // Note how much data is being fed to lzo
     userBufOff += uncompressedDirectBufLen;
@@ -261,7 +261,7 @@ public class LzoCompressor implements Compressor {
   }
 
   public synchronized int compress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     if (b == null) {
       throw new NullPointerException();
     }

+ 4 - 4
src/java/org/apache/hadoop/io/compress/lzo/LzoDecompressor.java

@@ -133,7 +133,7 @@ public class LzoDecompressor implements Decompressor {
       nativeLzoLoaded = true;
     } else {
       LOG.error("Cannot load " + LzoDecompressor.class.getName() + 
-              " without native-hadoop library!");
+                " without native-hadoop library!");
     }
   }
   
@@ -202,7 +202,7 @@ public class LzoDecompressor implements Decompressor {
     // Reinitialize lzo's input direct-buffer
     compressedDirectBuf.rewind();
     ((ByteBuffer)compressedDirectBuf).put(userBuf, userBufOff, 
-                                        compressedDirectBufLen);
+                                          compressedDirectBufLen);
     
     // Note how much data is being fed to lzo
     userBufOff += compressedDirectBufLen;
@@ -243,7 +243,7 @@ public class LzoDecompressor implements Decompressor {
   }
 
   public synchronized int decompress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     if (b == null) {
       throw new NullPointerException();
     }
@@ -255,7 +255,7 @@ public class LzoDecompressor implements Decompressor {
     
     // Check if there is uncompressed data
     n = uncompressedDirectBuf.remaining();
-    if(n > 0) {
+    if (n > 0) {
       n = Math.min(n, len);
       ((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
       return n;

+ 7 - 7
src/java/org/apache/hadoop/io/compress/zlib/ZlibCompressor.java

@@ -188,7 +188,7 @@ public class ZlibCompressor implements Compressor {
    * @param directBufferSize Size of the direct buffer to be used.
    */
   public ZlibCompressor(CompressionLevel level, CompressionStrategy strategy, 
-      CompressionHeader header, int directBufferSize) {
+                        CompressionHeader header, int directBufferSize) {
     this.level = level;
     this.strategy = strategy;
     this.windowBits = header;
@@ -209,9 +209,9 @@ public class ZlibCompressor implements Compressor {
    */
   public ZlibCompressor() {
     this(CompressionLevel.DEFAULT_COMPRESSION, 
-        CompressionStrategy.DEFAULT_STRATEGY, 
-        CompressionHeader.DEFAULT_HEADER, 
-        DEFAULT_DIRECT_BUFFER_SIZE);
+         CompressionStrategy.DEFAULT_STRATEGY, 
+         CompressionHeader.DEFAULT_HEADER, 
+         DEFAULT_DIRECT_BUFFER_SIZE);
   }
   
   public synchronized void setInput(byte[] b, int off, int len) {
@@ -242,7 +242,7 @@ public class ZlibCompressor implements Compressor {
     // Reinitialize zlib's input direct buffer
     uncompressedDirectBuf.rewind();
     ((ByteBuffer)uncompressedDirectBuf).put(userBuf, userBufOff,  
-                                          uncompressedDirectBufLen);
+                                            uncompressedDirectBufLen);
 
     // Note how much data is being fed to zlib
     userBufOff += uncompressedDirectBufLen;
@@ -289,7 +289,7 @@ public class ZlibCompressor implements Compressor {
   }
 
   public synchronized int compress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     if (b == null) {
       throw new NullPointerException();
     }
@@ -369,7 +369,7 @@ public class ZlibCompressor implements Compressor {
   private native static void initIDs();
   private native static long init(int level, int strategy, int windowBits);
   private native static void setDictionary(long strm, byte[] b, int off,
-       int len);
+                                           int len);
   private native int deflateBytesDirect();
   private native static long getBytesRead(long strm);
   private native static long getBytesWritten(long strm);

+ 4 - 4
src/java/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.java

@@ -147,7 +147,7 @@ public class ZlibDecompressor implements Decompressor {
     // Reinitialize zlib's input direct buffer
     compressedDirectBuf.rewind();
     ((ByteBuffer)compressedDirectBuf).put(userBuf, userBufOff, 
-                                        compressedDirectBufLen);
+                                          compressedDirectBufLen);
     
     // Note how much data is being fed to zlib
     userBufOff += compressedDirectBufLen;
@@ -195,7 +195,7 @@ public class ZlibDecompressor implements Decompressor {
   }
 
   public synchronized int decompress(byte[] b, int off, int len) 
-  throws IOException {
+    throws IOException {
     if (b == null) {
       throw new NullPointerException();
     }
@@ -207,7 +207,7 @@ public class ZlibDecompressor implements Decompressor {
     
     // Check if there is uncompressed data
     n = uncompressedDirectBuf.remaining();
-    if(n > 0) {
+    if (n > 0) {
       n = Math.min(n, len);
       ((ByteBuffer)uncompressedDirectBuf).get(b, off, n);
       return n;
@@ -278,7 +278,7 @@ public class ZlibDecompressor implements Decompressor {
   private native static void initIDs();
   private native static long init(int windowBits);
   private native static void setDictionary(long strm, byte[] b, int off,
-       int len);
+                                           int len);
   private native int inflateBytesDirect();
   private native static long getBytesRead(long strm);
   private native static long getBytesWritten(long strm);

+ 2 - 2
src/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -88,7 +88,7 @@ public class RetryPolicies {
    * </p>
    */
   public static final RetryPolicy retryByException(RetryPolicy defaultPolicy,
-        Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
+                                                   Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
     return new ExceptionDependentRetry(defaultPolicy, exceptionToPolicyMap);
   }
   
@@ -169,7 +169,7 @@ public class RetryPolicies {
     Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap;
     
     public ExceptionDependentRetry(RetryPolicy defaultPolicy,
-        Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
+                                   Map<Class<? extends Exception>, RetryPolicy> exceptionToPolicyMap) {
       this.defaultPolicy = defaultPolicy;
       this.exceptionToPolicyMap = exceptionToPolicyMap;
     }

+ 13 - 13
src/java/org/apache/hadoop/ipc/Client.java

@@ -65,7 +65,7 @@ public class Client {
     new Hashtable<InetSocketAddress, Connection>();
 
   private Class valueClass;                       // class of call values
-  private int timeout ;// timeout for calls
+  private int timeout;// timeout for calls
   private int counter;                            // counter for call ids
   private boolean running = true;                 // true while client runs
   private Configuration conf;
@@ -94,7 +94,7 @@ public class Client {
     /** Called by the connection thread when the call is complete and the
      * value or error string are available.  Notifies by default.  */
     public synchronized void callComplete() {
-        notify();                                 // notify caller
+      notify();                                 // notify caller
     }
 
     /** Update lastActivity with the current time. */
@@ -132,7 +132,7 @@ public class Client {
 
     public Connection(InetSocketAddress address) throws IOException {
       if (address.isUnresolved()) {
-         throw new UnknownHostException("unknown host: " + address.getHostName());
+        throw new UnknownHostException("unknown host: " + address.getHostName());
       }
       this.address = address;
       this.setName("IPC Client connection to " + address.toString());
@@ -183,7 +183,7 @@ public class Client {
                }
                return value;
              }
-          }));
+           }));
       this.out = new DataOutputStream
         (new BufferedOutputStream
          (new FilterOutputStream(socket.getOutputStream()) {
@@ -282,7 +282,7 @@ public class Client {
           decrementRef();
         }
       } catch (EOFException eof) {
-          // This is what happens when the remote side goes down
+        // This is what happens when the remote side goes down
       } catch (Exception e) {
         LOG.info(StringUtils.stringifyException(e));
       } finally {
@@ -408,11 +408,11 @@ public class Client {
           while (i.hasNext()) {
             Connection c = (Connection)i.next();
             if (c.isIdle()) { 
-            //We don't actually close the socket here (i.e., don't invoke
-            //the close() method). We leave that work to the response receiver
-            //thread. The reason for that is since we have taken a lock on the
-            //connections table object, we don't want to slow down the entire
-            //system if we happen to talk to a slow server.
+              //We don't actually close the socket here (i.e., don't invoke
+              //the close() method). We leave that work to the response receiver
+              //thread. The reason for that is since we have taken a lock on the
+              //connections table object, we don't want to slow down the entire
+              //system if we happen to talk to a slow server.
               i.remove();
               synchronized (c) {
                 c.setCloseConnection();
@@ -429,8 +429,8 @@ public class Client {
    * class. */
   public Client(Class valueClass, Configuration conf) {
     this.valueClass = valueClass;
-    this.timeout = conf.getInt("ipc.client.timeout",10000);
-    this.maxIdleTime = conf.getInt("ipc.client.connection.maxidletime",1000);
+    this.timeout = conf.getInt("ipc.client.timeout", 10000);
+    this.maxIdleTime = conf.getInt("ipc.client.connection.maxidletime", 1000);
     this.maxRetries = conf.getInt("ipc.client.connect.max.retries", 10);
     this.conf = conf;
 
@@ -438,7 +438,7 @@ public class Client {
     t.setDaemon(true);
     t.setName(valueClass.getName() + " Connection Culler");
     LOG.debug(valueClass.getName() + 
-             "Connection culler maxidletime= " + maxIdleTime + "ms");
+              "Connection culler maxidletime= " + maxIdleTime + "ms");
     t.start();
   }
  

+ 11 - 11
src/java/org/apache/hadoop/ipc/RPC.java

@@ -144,7 +144,7 @@ public class RPC {
    * Stop all RPC client connections
    */
   public static synchronized void stopClient(){
-    if(CLIENT != null)
+    if (CLIENT != null)
       CLIENT.stop();
   }
 
@@ -224,9 +224,9 @@ public class RPC {
     while (true) {
       try {
         return getProxy(protocol, clientVersion, addr, conf);
-      } catch( ConnectException se ) {  // namenode has not been started
+      } catch(ConnectException se) {  // namenode has not been started
         LOG.info("Server at " + addr + " not available yet, Zzzzz...");
-      } catch( SocketTimeoutException te ) {  // namenode is busy
+      } catch(SocketTimeoutException te) {  // namenode is busy
         LOG.info("Problem connecting to server: " + addr);
       }
       try {
@@ -241,9 +241,9 @@ public class RPC {
   public static VersionedProtocol getProxy(Class protocol, long clientVersion,
                                            InetSocketAddress addr, Configuration conf) throws IOException {
     VersionedProtocol proxy = (VersionedProtocol) Proxy.newProxyInstance(
-                                  protocol.getClassLoader(),
-                                  new Class[] { protocol },
-                                  new Invoker(addr, conf));
+                                                                         protocol.getClassLoader(),
+                                                                         new Class[] { protocol },
+                                                                         new Invoker(addr, conf));
     long serverVersion = proxy.getProtocolVersion(protocol.getName(), 
                                                   clientVersion);
     if (serverVersion == clientVersion) {
@@ -269,7 +269,7 @@ public class RPC {
     }
 
     Object[] values =
-      (Object[])Array.newInstance(method.getReturnType(),wrappedValues.length);
+      (Object[])Array.newInstance(method.getReturnType(), wrappedValues.length);
     for (int i = 0; i < values.length; i++)
       if (wrappedValues[i] != null)
         values[i] = ((ObjectWritable)wrappedValues[i]).get();
@@ -280,7 +280,7 @@ public class RPC {
   /** Construct a server for a protocol implementation instance listening on a
    * port and address. */
   public static Server getServer(final Object instance, final String bindAddress, final int port, Configuration conf) 
-  throws IOException {
+    throws IOException {
     return getServer(instance, bindAddress, port, 1, false, conf);
   }
 
@@ -289,8 +289,8 @@ public class RPC {
   public static Server getServer(final Object instance, final String bindAddress, final int port,
                                  final int numHandlers,
                                  final boolean verbose, Configuration conf) 
-  throws IOException {
-    return new Server(instance, conf, bindAddress,port, numHandlers, verbose);
+    throws IOException {
+    return new Server(instance, conf, bindAddress, port, numHandlers, verbose);
   }
 
   /** An RPC Server. */
@@ -306,7 +306,7 @@ public class RPC {
      * @param port the port to listen for connections on
      */
     public Server(Object instance, Configuration conf, String bindAddress, int port) 
-    throws IOException {
+      throws IOException {
       this(instance, conf,  bindAddress, port, 1, false);
     }
 

Some files were not shown because too many files changed in this diff