Browse Source

HADOOP-1147. Remove @author tags from Java source files.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@549284 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 18 years ago
parent
commit
2554cad056
100 changed files with 2 additions and 133 deletions
  1. 2 0
      CHANGES.txt
  2. 0 1
      src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/WordCountAggregatorDescriptor.java
  3. 0 1
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorBaseDescriptor.java
  4. 0 1
      src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJobBase.java
  5. 0 1
      src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java
  6. 0 2
      src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java
  7. 0 1
      src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java
  8. 0 2
      src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java
  9. 0 1
      src/contrib/hbase/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java
  10. 0 1
      src/contrib/hbase/src/java/org/onelab/filter/BloomFilter.java
  11. 0 1
      src/contrib/hbase/src/java/org/onelab/filter/CountingBloomFilter.java
  12. 0 1
      src/contrib/hbase/src/java/org/onelab/filter/DynamicBloomFilter.java
  13. 0 1
      src/contrib/hbase/src/java/org/onelab/filter/RemoveScheme.java
  14. 0 1
      src/contrib/hbase/src/java/org/onelab/filter/RetouchedBloomFilter.java
  15. 0 1
      src/contrib/hbase/src/test/org/onelab/test/StringKey.java
  16. 0 1
      src/contrib/hbase/src/test/org/onelab/test/TestFilter.java
  17. 0 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java
  18. 0 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java
  19. 0 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java
  20. 0 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java
  21. 0 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java
  22. 0 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java
  23. 0 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java
  24. 0 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java
  25. 0 2
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java
  26. 0 1
      src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java
  27. 0 1
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java
  28. 0 2
      src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java
  29. 0 1
      src/examples/org/apache/hadoop/examples/ExampleDriver.java
  30. 0 2
      src/examples/org/apache/hadoop/examples/PiEstimator.java
  31. 0 2
      src/examples/org/apache/hadoop/examples/Sort.java
  32. 0 2
      src/examples/org/apache/hadoop/examples/WordCount.java
  33. 0 1
      src/java/org/apache/hadoop/HadoopVersionAnnotation.java
  34. 0 1
      src/java/org/apache/hadoop/dfs/AlreadyBeingCreatedException.java
  35. 0 1
      src/java/org/apache/hadoop/dfs/Block.java
  36. 0 1
      src/java/org/apache/hadoop/dfs/BlockCommand.java
  37. 0 3
      src/java/org/apache/hadoop/dfs/ClientProtocol.java
  38. 0 2
      src/java/org/apache/hadoop/dfs/DFSAdmin.java
  39. 0 3
      src/java/org/apache/hadoop/dfs/DFSClient.java
  40. 0 1
      src/java/org/apache/hadoop/dfs/DFSFileInfo.java
  41. 0 1
      src/java/org/apache/hadoop/dfs/DFSck.java
  42. 0 1
      src/java/org/apache/hadoop/dfs/DataNode.java
  43. 0 1
      src/java/org/apache/hadoop/dfs/DataStorage.java
  44. 0 2
      src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java
  45. 0 1
      src/java/org/apache/hadoop/dfs/DatanodeID.java
  46. 0 3
      src/java/org/apache/hadoop/dfs/DatanodeInfo.java
  47. 0 1
      src/java/org/apache/hadoop/dfs/DatanodeProtocol.java
  48. 0 1
      src/java/org/apache/hadoop/dfs/DatanodeRegistration.java
  49. 0 1
      src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java
  50. 0 1
      src/java/org/apache/hadoop/dfs/DistributedFileSystem.java
  51. 0 1
      src/java/org/apache/hadoop/dfs/FSConstants.java
  52. 0 3
      src/java/org/apache/hadoop/dfs/FSDataset.java
  53. 0 2
      src/java/org/apache/hadoop/dfs/FSDirectory.java
  54. 0 1
      src/java/org/apache/hadoop/dfs/FSEditLog.java
  55. 0 1
      src/java/org/apache/hadoop/dfs/FSImage.java
  56. 0 6
      src/java/org/apache/hadoop/dfs/FSNamesystem.java
  57. 0 1
      src/java/org/apache/hadoop/dfs/FileUnderConstruction.java
  58. 0 1
      src/java/org/apache/hadoop/dfs/FsckServlet.java
  59. 0 1
      src/java/org/apache/hadoop/dfs/GetImageServlet.java
  60. 0 1
      src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java
  61. 0 1
      src/java/org/apache/hadoop/dfs/IncorrectVersionException.java
  62. 0 1
      src/java/org/apache/hadoop/dfs/LeaseExpiredException.java
  63. 0 1
      src/java/org/apache/hadoop/dfs/LocatedBlock.java
  64. 0 1
      src/java/org/apache/hadoop/dfs/NameNode.java
  65. 0 3
      src/java/org/apache/hadoop/dfs/NamenodeFsck.java
  66. 0 1
      src/java/org/apache/hadoop/dfs/NamespaceInfo.java
  67. 0 1
      src/java/org/apache/hadoop/dfs/NotReplicatedYetException.java
  68. 0 1
      src/java/org/apache/hadoop/dfs/PendingCreates.java
  69. 0 1
      src/java/org/apache/hadoop/dfs/PendingReplicationBlocks.java
  70. 0 2
      src/java/org/apache/hadoop/dfs/ReplicationTargetChooser.java
  71. 0 1
      src/java/org/apache/hadoop/dfs/SafeModeException.java
  72. 0 2
      src/java/org/apache/hadoop/dfs/SecondaryNameNode.java
  73. 0 2
      src/java/org/apache/hadoop/dfs/Storage.java
  74. 0 1
      src/java/org/apache/hadoop/dfs/TransferFsImage.java
  75. 0 1
      src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java
  76. 0 1
      src/java/org/apache/hadoop/filecache/DistributedCache.java
  77. 0 1
      src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  78. 0 1
      src/java/org/apache/hadoop/fs/FSInputStream.java
  79. 0 1
      src/java/org/apache/hadoop/fs/FileSystem.java
  80. 0 1
      src/java/org/apache/hadoop/fs/FileUtil.java
  81. 0 1
      src/java/org/apache/hadoop/fs/FilterFileSystem.java
  82. 0 2
      src/java/org/apache/hadoop/fs/InMemoryFileSystem.java
  83. 0 1
      src/java/org/apache/hadoop/fs/LocalFileSystem.java
  84. 0 1
      src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  85. 0 1
      src/java/org/apache/hadoop/fs/s3/S3FileSystem.java
  86. 0 1
      src/java/org/apache/hadoop/io/BytesWritable.java
  87. 0 1
      src/java/org/apache/hadoop/io/DataInputBuffer.java
  88. 0 1
      src/java/org/apache/hadoop/io/DataOutputBuffer.java
  89. 0 1
      src/java/org/apache/hadoop/io/GenericWritable.java
  90. 0 2
      src/java/org/apache/hadoop/io/MD5Hash.java
  91. 0 1
      src/java/org/apache/hadoop/io/UTF8.java
  92. 0 1
      src/java/org/apache/hadoop/io/VIntWritable.java
  93. 0 1
      src/java/org/apache/hadoop/io/VLongWritable.java
  94. 0 2
      src/java/org/apache/hadoop/io/VersionedWritable.java
  95. 0 2
      src/java/org/apache/hadoop/io/Writable.java
  96. 0 2
      src/java/org/apache/hadoop/io/WritableComparable.java
  97. 0 1
      src/java/org/apache/hadoop/io/WritableName.java
  98. 0 1
      src/java/org/apache/hadoop/io/compress/BlockCompressorStream.java
  99. 0 1
      src/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java
  100. 0 1
      src/java/org/apache/hadoop/io/compress/CompressionCodec.java

+ 2 - 0
CHANGES.txt

@@ -226,6 +226,8 @@ Trunk (unreleased changes)
  68. HADOOP-1501.  Better randomize sending of block reports to
      namenode, so reduce load spikes.  (Dhruba Borthakur via cutting)
 
+ 69. HADOOP-1147.  Remove @author tags from Java source files.
+
 
 Release 0.13.0 - 2007-06-08
 

+ 0 - 1
src/contrib/abacus/src/examples/org/apache/hadoop/abacus/examples/WordCountAggregatorDescriptor.java

@@ -27,7 +27,6 @@ import org.apache.hadoop.mapred.JobConf;
 /**
  * This class implements a user defined aggregator descriptor that is used
  * for counting the words in the input data
- *
  */
 public class WordCountAggregatorDescriptor extends
     ValueAggregatorBaseDescriptor {

+ 0 - 1
src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorBaseDescriptor.java

@@ -28,7 +28,6 @@ import org.apache.hadoop.mapred.JobConf;
  * 
  * This class implements the common functionalities of 
  * the subclasses of ValueAggregatorDescriptor class.
- *
  */
 public class ValueAggregatorBaseDescriptor implements ValueAggregatorDescriptor {
 

+ 0 - 1
src/contrib/abacus/src/java/org/apache/hadoop/abacus/ValueAggregatorJobBase.java

@@ -29,7 +29,6 @@ import org.apache.hadoop.mapred.JobConf;
  * 
  * This abstract class implements some common functionalities of the
  * the generic mapper, reducer and combiner classes of Abacus.
- *
  */
 public abstract class ValueAggregatorJobBase extends JobBase {
  

+ 0 - 1
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ArrayListBackedIterator.java

@@ -26,7 +26,6 @@ import java.util.Iterator;
  * This class provides an implementation of ResetableIterator. The
  * implementation will be based on ArrayList.
  * 
- * @author runping
  * 
  */
 public class ArrayListBackedIterator implements ResetableIterator {

+ 0 - 2
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/DataJoinJob.java

@@ -39,8 +39,6 @@ import org.apache.hadoop.mapred.TextOutputFormat;
  * user must implement a mapper class that extends DataJoinMapperBase class,
  * and a reducer class that extends DataJoinReducerBase. 
  * 
- * @author runping
- *
  */
 public class DataJoinJob {
 

+ 0 - 1
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java

@@ -27,7 +27,6 @@ import java.util.Iterator;
  * according the their source tags. Once the value re-grouped, the reducer can
  * perform the cross product over the values in different groups.
  * 
- * @author runping
  * 
  */
 public interface ResetableIterator extends Iterator {

+ 0 - 2
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/TaggedMapOutput.java

@@ -29,8 +29,6 @@ import org.apache.hadoop.io.Writable;
  * file name of the input file. This tag will be used by the reducers
  * to re-group the values of a given key according to their source tags.
  * 
- * @author runping
- *
  */
 public abstract class TaggedMapOutput implements Writable {
   protected Text tag;

+ 0 - 1
src/contrib/hbase/src/java/org/apache/hadoop/hbase/HColumnDescriptor.java

@@ -27,7 +27,6 @@ import org.apache.hadoop.io.WritableComparable;
 /**
  * A HColumnDescriptor contains information about a column family such as the
  * number of versions, compression settings, etc.
- *
  */
 public class HColumnDescriptor implements WritableComparable {
   

+ 0 - 1
src/contrib/hbase/src/java/org/onelab/filter/BloomFilter.java

@@ -46,7 +46,6 @@ import java.io.IOException;
  * filter will occasionally return a false positive, it will never return a false negative. When creating 
  * the filter, the sender can choose its desired point in a trade-off between the false positive rate and the size. 
  * 
- * @author <a href="mailto:donnet@ucl.ac.be">Benoit Donnet</a> - Universite Catholique de Louvain - Faculte des Sciences Appliquees - Departement d'Ingenierie Informatique.
  * contract <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
  *
  * @version 1.0 - 2 Feb. 07

+ 0 - 1
src/contrib/hbase/src/java/org/onelab/filter/CountingBloomFilter.java

@@ -42,7 +42,6 @@ import java.io.IOException;
  * allows dynamic additions and deletions of set membership information.  This 
  * is achieved through the use of a counting vector instead of a bit vector.
  * 
- * @author <a href="mailto:donnet@ucl.ac.be">Benoit Donnet</a> - Universite Catholique de Louvain - Faculte des Sciences Appliquees - Departement d'Ingenierie Informatique.
  * contract <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
  *
  * @version 1.0 - 5 Feb. 07

+ 0 - 1
src/contrib/hbase/src/java/org/onelab/filter/DynamicBloomFilter.java

@@ -58,7 +58,6 @@ import java.io.IOException;
  * this new Bloom filter is set to one.  A given key is said to belong to the
  * DBF if the <code>k</code> positions are set to one in one of the matrix rows.
  * 
- * @author <a href="mailto:donnet@ucl.ac.be">Benoit Donnet</a> - Universite Catholique de Louvain - Faculte des Sciences Appliquees - Departement d'Ingenierie Informatique.
  * contract <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
  *
  * @version 1.0 - 6 Feb. 07

+ 0 - 1
src/contrib/hbase/src/java/org/onelab/filter/RemoveScheme.java

@@ -35,7 +35,6 @@ package org.onelab.filter;
 /**
  * Defines the different remove scheme for retouched Bloom filters.
  * 
- * @author <a href="mailto:donnet@ucl.ac.be">Benoit Donnet</a> - Universite Catholique de Louvain - Faculte des Sciences Appliquees - Departement d'Ingenierie Informatique.
  * contract <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
  *
  * @version 1.0 - 7 Feb. 07

+ 0 - 1
src/contrib/hbase/src/java/org/onelab/filter/RetouchedBloomFilter.java

@@ -44,7 +44,6 @@ import java.util.Random;
  * random false negatives, and with the benefit of eliminating some random false
  * positives at the same time.
  * 
- * @author <a href="mailto:donnet@ucl.ac.be">Benoit Donnet</a> - Universite Catholique de Louvain - Faculte des Sciences Appliquees - Departement d'Ingenierie Informatique.
  * contract <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
  *
  * @version 1.0 - 7 Feb. 07

+ 0 - 1
src/contrib/hbase/src/test/org/onelab/test/StringKey.java

@@ -38,7 +38,6 @@ import org.onelab.filter.Key;
  * <p>
  * It gives an example on how to extend Key.
  * 
- * @author <a href="mailto:donnet@ucl.ac.be">Benoit Donnet</a> - Universite Catholique de Louvain - Faculte des Sciences Appliquees - Departement d'Ingenierie Informatique.
  * contract <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
  *
  * @version 1.0 - 5 Feb. 07

+ 0 - 1
src/contrib/hbase/src/test/org/onelab/test/TestFilter.java

@@ -38,7 +38,6 @@ import org.onelab.filter.*;
 /**
  * Test class.
  * 
- * @author <a href="mailto:donnet@ucl.ac.be">Benoit Donnet</a> - Universite Catholique de Louvain - Faculte des Sciences Appliquees - Departement d'Ingenierie Informatique.
  * contract <a href="http://www.one-lab.org">European Commission One-Lab Project 034819</a>.
  *
  * @version 1.0 - 8 Feb. 07

+ 0 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/Environment.java

@@ -27,7 +27,6 @@ import java.util.*;
  * on the host machines running the map/reduce. This class
  * assumes that setting the environment in streaming is 
  * allowed on windows/ix/linuz/freebsd/sunos/solaris/hp-ux
- * @author michel
  */
 public class Environment extends Properties {
 

+ 0 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/JarBuilder.java

@@ -30,8 +30,6 @@ import java.util.zip.ZipException;
  * hadoop-streaming is a user level appplication, so all the classes
  * with hadoop-streaming that are needed in the job are also included
  * in the job.jar.
- * @author michel
- *
  */
 public class JarBuilder {
 

+ 0 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PathFinder.java

@@ -24,7 +24,6 @@ import java.util.*;
 /**
  * Maps a relative pathname to an absolute pathname using the
  * PATH enviroment.
- * @author Dhruba Borthakur
  */
 public class PathFinder
 {

+ 0 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapRed.java

@@ -43,7 +43,6 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.fs.FileSystem;
 
 /** Shared functionality for PipeMapper, PipeReducer.
- *  @author Michel Tourn
  */
 public abstract class PipeMapRed {
 

+ 0 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeMapper.java

@@ -34,7 +34,6 @@ import org.apache.hadoop.io.Writable;
 
 /** A generic Mapper bridge.
  *  It delegates operations to an external program via stdin and stdout.
- *  @author Michel Tourn
  */
 public class PipeMapper extends PipeMapRed implements Mapper {
 

+ 0 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/PipeReducer.java

@@ -34,7 +34,6 @@ import org.apache.hadoop.io.Writable;
 
 /** A generic Reducer bridge.
  *  It delegates operations to an external program via stdin and stdout.
- *  @author Michel Tourn
  */
 public class PipeReducer extends PipeMapRed implements Reducer {
 

+ 0 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamBaseRecordReader.java

@@ -38,7 +38,6 @@ import org.apache.commons.logging.*;
  * and is selected with the option bin/hadoopStreaming -inputreader ...
  * @see StreamLineRecordReader
  * @see StreamXmlRecordReader 
- * @author Michel Tourn
  */
 public abstract class StreamBaseRecordReader implements RecordReader {
 

+ 0 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamJob.java

@@ -66,7 +66,6 @@ import org.apache.hadoop.util.*;
 
 /** All the client-side work happens here.
  * (Jar packaging, MapRed job submission and monitoring)
- * @author Michel Tourn
  */
 public class StreamJob {
 

+ 0 - 2
src/contrib/streaming/src/java/org/apache/hadoop/streaming/StreamXmlRecordReader.java

@@ -41,8 +41,6 @@ import org.apache.hadoop.mapred.JobConf;
  *    int maxrec   (maximum record size)
  *    int lookahead(maximum lookahead to sync CDATA)
  *    boolean slowmatch
- *
- *  @author Michel Tourn
  */
 public class StreamXmlRecordReader extends StreamBaseRecordReader {
 

+ 0 - 1
src/contrib/streaming/src/java/org/apache/hadoop/streaming/UTF8ByteArrayUtils.java

@@ -27,7 +27,6 @@ import org.apache.hadoop.mapred.LineRecordReader;
 
 /**
  * General utils for byte array containing UTF-8 encoded strings
- * @author hairong 
  */
 
 public class UTF8ByteArrayUtils {

+ 0 - 1
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestStreamedMerge.java

@@ -48,7 +48,6 @@ import org.apache.hadoop.fs.Path;
  *     [-Dhadoop.test.localoutputfile=/tmp/fifo] \ 
  *     test-unix 
  * </pre>
- * @author michel
  */
 public class TestStreamedMerge extends TestCase {
 

+ 0 - 2
src/contrib/streaming/src/test/org/apache/hadoop/streaming/TestSymLink.java

@@ -29,8 +29,6 @@ import org.apache.hadoop.dfs.MiniDFSCluster;
 /**
  * This test case tests the symlink creation
  * utility provided by distributed caching 
- * @author mahadev
- *
  */
 public class TestSymLink extends TestCase
 {

+ 0 - 1
src/examples/org/apache/hadoop/examples/ExampleDriver.java

@@ -23,7 +23,6 @@ import org.apache.hadoop.examples.dancing.*;
 /**
  * A description of an example program based on its class and a 
  * human-readable description.
- * @author Owen O'Malley
  */
 public class ExampleDriver {
   

+ 0 - 2
src/examples/org/apache/hadoop/examples/PiEstimator.java

@@ -34,8 +34,6 @@ import org.apache.hadoop.mapred.*;
 /**
  * A Map-reduce program to estimaate the valu eof Pi using monte-carlo
  * method.
- *
- * @author Milind Bhandarkar
  */
 public class PiEstimator {
   

+ 0 - 2
src/examples/org/apache/hadoop/examples/Sort.java

@@ -33,8 +33,6 @@ import org.apache.hadoop.fs.*;
  *
  * To run: bin/hadoop jar build/hadoop-examples.jar sort
  *            [-m <i>maps</i>] [-r <i>reduces</i>] <i>in-dir</i> <i>out-dir</i> 
- *
- * @author Owen O'Malley
  */
 public class Sort {
   

+ 0 - 2
src/examples/org/apache/hadoop/examples/WordCount.java

@@ -42,8 +42,6 @@ import org.apache.hadoop.mapred.MapReduceBase;
  *
  * To run: bin/hadoop jar build/hadoop-examples.jar wordcount
  *            [-m <i>maps</i>] [-r <i>reduces</i>] <i>in-dir</i> <i>out-dir</i> 
- *
- * @author Owen O'Malley
  */
 public class WordCount {
   

+ 0 - 1
src/java/org/apache/hadoop/HadoopVersionAnnotation.java

@@ -21,7 +21,6 @@ import java.lang.annotation.*;
 
 /**
  * A package attribute that captures the version of Hadoop that was compiled.
- * @author Owen O'Malley
  */
 @Retention(RetentionPolicy.RUNTIME)
 @Target(ElementType.PACKAGE)

+ 0 - 1
src/java/org/apache/hadoop/dfs/AlreadyBeingCreatedException.java

@@ -5,7 +5,6 @@ import java.io.IOException;
 /**
  * The exception that happens when you ask to create a file that already
  * is being created, but is not closed yet.
- * @author Owen O'Malley
  */
 public class AlreadyBeingCreatedException extends IOException {
   public AlreadyBeingCreatedException(String msg) {

+ 0 - 1
src/java/org/apache/hadoop/dfs/Block.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.io.*;
  * A Block is a Hadoop FS primitive, identified by a 
  * long.
  *
- * @author Mike Cafarella
  **************************************************/
 class Block implements Writable, Comparable {
 

+ 0 - 1
src/java/org/apache/hadoop/dfs/BlockCommand.java

@@ -59,7 +59,6 @@ class DatanodeCommand implements Writable {
  * blocks, or to copy a set of indicated blocks to 
  * another DataNode.
  * 
- * @author Mike Cafarella
  ****************************************************/
 class BlockCommand extends DatanodeCommand {
   Block blocks[];

+ 0 - 3
src/java/org/apache/hadoop/dfs/ClientProtocol.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.ipc.VersionedProtocol;
  * with the NameNode.  User code can manipulate the directory namespace, 
  * as well as open/close file streams, etc.
  *
- * @author Mike Cafarella
  **********************************************************************/
 interface ClientProtocol extends VersionedProtocol {
 
@@ -114,7 +113,6 @@ interface ClientProtocol extends VersionedProtocol {
    * @throws IOException
    * @return true if successful;
    *         false if file does not exist or is a directory
-   * @author shv
    */
   public boolean setReplication(String src, 
                                 short replication
@@ -324,7 +322,6 @@ interface ClientProtocol extends VersionedProtocol {
    * @return <ul><li>0 if the safe mode is OFF or</li> 
    *         <li>1 if the safe mode is ON.</li></ul>
    * @throws IOException
-   * @author Konstantin Shvachko
    */
   public boolean setSafeMode(FSConstants.SafeModeAction action) throws IOException;
 

+ 0 - 2
src/java/org/apache/hadoop/dfs/DFSAdmin.java

@@ -26,8 +26,6 @@ import org.apache.hadoop.ipc.RPC;
 
 /**
  * This class provides some DFS administrative access.
- *
- * @author Dhruba Borthakur
  */
 public class DFSAdmin extends FsShell {
 

+ 0 - 3
src/java/org/apache/hadoop/dfs/DFSClient.java

@@ -43,7 +43,6 @@ import java.util.concurrent.TimeUnit;
  * DistributedFileSystem, which uses DFSClient to handle
  * filesystem tasks.
  *
- * @author Mike Cafarella, Tessa MacDuff
  ********************************************************/
 class DFSClient implements FSConstants {
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.DFSClient");
@@ -70,7 +69,6 @@ class DFSClient implements FSConstants {
   /**
    * A class to track the list of DFS clients, so that they can be closed
    * on exit.
-   * @author Owen O'Malley
    */
   private static class ClientFinalizer extends Thread {
     private List<DFSClient> clients = new ArrayList<DFSClient>();
@@ -355,7 +353,6 @@ class DFSClient implements FSConstants {
    * @param replication
    * @throws IOException
    * @return true is successful or false if file does not exist 
-   * @author shv
    */
   public boolean setReplication(UTF8 src, 
                                 short replication

+ 0 - 1
src/java/org/apache/hadoop/dfs/DFSFileInfo.java

@@ -29,7 +29,6 @@ import java.io.*;
  * Includes partial information about its blocks.
  * Block locations are sorted by the distance to the current client.
  * 
- * @author Mike Cafarella
  ******************************************************/
 class DFSFileInfo implements Writable {
   static {                                      // register a ctor

+ 0 - 1
src/java/org/apache/hadoop/dfs/DFSck.java

@@ -52,7 +52,6 @@ import org.apache.hadoop.util.ToolBase;
  *  optionally can print detailed statistics on block locations and replication
  *  factors of each file.
  *  
- * @author Andrzej Bialecki
  */
 public class DFSck extends ToolBase {
   private static final Log LOG = LogFactory.getLog(DFSck.class.getName());

+ 0 - 1
src/java/org/apache/hadoop/dfs/DataNode.java

@@ -68,7 +68,6 @@ import org.apache.hadoop.metrics.Updater;
  * this server is reported to the NameNode, which then sends that
  * information to clients or other DataNodes that might be interested.
  *
- * @author Mike Cafarella
  **********************************************************/
 public class DataNode implements FSConstants, Runnable {
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.DataNode");

+ 0 - 1
src/java/org/apache/hadoop/dfs/DataStorage.java

@@ -18,7 +18,6 @@ import org.apache.hadoop.fs.FileUtil.HardLink;
  * Data storage information file.
  * <p>
  * @see Storage
- * @author Konstantin Shvachko
  */
 class DataStorage extends Storage {
   // Constants

+ 0 - 2
src/java/org/apache/hadoop/dfs/DatanodeDescriptor.java

@@ -32,8 +32,6 @@ import org.apache.hadoop.net.Node;
  * or the Datnodes. Neither is it stored persistently in the
  * fsImage.
 
- * @author Mike Cafarella
- * @author Konstantin Shvachko
  **************************************************/
 public class DatanodeDescriptor extends DatanodeInfo {
 

+ 0 - 1
src/java/org/apache/hadoop/dfs/DatanodeID.java

@@ -12,7 +12,6 @@ import org.apache.hadoop.io.WritableComparable;
  * name (hostname:portNumber) and the data storage ID, 
  * which it currently represents.
  * 
- * @author Konstantin Shvachko
  */
 public class DatanodeID implements WritableComparable {
 

+ 0 - 3
src/java/org/apache/hadoop/dfs/DatanodeInfo.java

@@ -37,9 +37,6 @@ import org.apache.hadoop.net.NodeBase;
  * DatanodeInfo represents the status of a DataNode.
  * This object is used for communication in the
  * Datanode Protocol and the Client Protocol.
- *
- * @author Mike Cafarella
- * @author Konstantin Shvachko
  */
 public class DatanodeInfo extends DatanodeID implements Node {
   protected long capacity;

+ 0 - 1
src/java/org/apache/hadoop/dfs/DatanodeProtocol.java

@@ -28,7 +28,6 @@ import org.apache.hadoop.ipc.VersionedProtocol;
  * The only way a NameNode can communicate with a DataNode is by
  * returning values from these functions.
  *
- * @author Michael Cafarella
  **********************************************************************/
 interface DatanodeProtocol extends VersionedProtocol {
   /*

+ 0 - 1
src/java/org/apache/hadoop/dfs/DatanodeRegistration.java

@@ -14,7 +14,6 @@ import org.apache.hadoop.io.WritableFactory;
  * to identify and verify a Datanode when it contacts the Namenode.
  * This information is sent by Datanode with each communication request.
  * 
- * @author Konstantin Shvachko
  */
 class DatanodeRegistration extends DatanodeID implements Writable {
   static {                                      // register a ctor

+ 0 - 1
src/java/org/apache/hadoop/dfs/DisallowedDatanodeException.java

@@ -8,7 +8,6 @@ import java.io.IOException;
  * with the namenode when it does not appear on the list of included nodes, 
  * or has been specifically excluded.
  * 
- * @author Wendy Chien
  */
 class DisallowedDatanodeException extends IOException {
 

+ 0 - 1
src/java/org/apache/hadoop/dfs/DistributedFileSystem.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.util.*;
  * This object is the way end-user code interacts with a Hadoop
  * DistributedFileSystem.
  *
- * @author Mike Cafarella
  *****************************************************************/
 public class DistributedFileSystem extends ChecksumFileSystem {
   private static class RawDistributedFileSystem extends FileSystem {

+ 0 - 1
src/java/org/apache/hadoop/dfs/FSConstants.java

@@ -22,7 +22,6 @@ import org.apache.hadoop.conf.Configuration;
 /************************************
  * Some handy constants
  *
- * @author Mike Cafarella
  ************************************/
 public interface FSConstants {
   public static int MIN_BLOCKS_FOR_WRITE = 5;

+ 0 - 3
src/java/org/apache/hadoop/dfs/FSDataset.java

@@ -30,7 +30,6 @@ import org.apache.hadoop.conf.*;
  * FSDataset manages a set of data blocks.  Each block
  * has a unique name and an extent on disk.
  *
- * @author Mike Cafarella
  ***************************************************/
 class FSDataset implements FSConstants {
 
@@ -179,7 +178,6 @@ class FSDataset implements FSConstants {
     /**
      * check if a data diretory is healthy
      * @throws DiskErrorException
-     * @author hairong
      */
     public void checkDirTree() throws DiskErrorException {
       DiskChecker.checkDir(dir);
@@ -651,7 +649,6 @@ class FSDataset implements FSConstants {
   /**
    * check if a data diretory is healthy
    * @throws DiskErrorException
-   * @author hairong
    */
   void checkDataDir() throws DiskErrorException {
     volumes.checkDirs();

+ 0 - 2
src/java/org/apache/hadoop/dfs/FSDirectory.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.metrics.MetricsContext;
  * It keeps the filename->blockset mapping always-current
  * and logged to disk.
  * 
- * @author Mike Cafarella
  *************************************************/
 class FSDirectory implements FSConstants {
 
@@ -180,7 +179,6 @@ class FSDirectory implements FSConstants {
      * @param newNode INode to be added
      * @return null if the node already exists; inserted INode, otherwise
      * @throws FileNotFoundException 
-     * @author shv
      */
     INode addNode(String path, INode newNode) throws FileNotFoundException {
       File target = new File(path);

+ 0 - 1
src/java/org/apache/hadoop/dfs/FSEditLog.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.io.Writable;
 /**
  * FSEditLog maintains a log of the namespace modifications.
  * 
- * @author Konstantin Shvachko
  */
 class FSEditLog {
   private static final byte OP_ADD = 0;

+ 0 - 1
src/java/org/apache/hadoop/dfs/FSImage.java

@@ -45,7 +45,6 @@ import org.apache.hadoop.io.WritableComparable;
 /**
  * FSImage handles checkpointing and logging of the namespace edits.
  * 
- * @author Konstantin Shvachko
  */
 class FSImage extends Storage {
 

+ 0 - 6
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -516,7 +516,6 @@ class FSNamesystem implements FSConstants {
    * @param replication new replication
    * @return true if successful; 
    *         false if file does not exist or is a directory
-   * @author shv
    */
   public boolean setReplication(String src, short replication) 
                                 throws IOException {
@@ -1482,7 +1481,6 @@ class FSNamesystem implements FSConstants {
    * registered with the namenode without restarting the whole cluster.
    * 
    * @see DataNode#register()
-   * @author Konstantin Shvachko
    */
   public void registerDatanode(DatanodeRegistration nodeReg,
                                String networkLocation
@@ -1875,7 +1873,6 @@ class FSNamesystem implements FSConstants {
   /**
    * remove a datanode descriptor
    * @param nodeID datanode ID
-   * @author hairong
    */
   synchronized public void removeDatanode(DatanodeID nodeID) 
     throws IOException {
@@ -1891,7 +1888,6 @@ class FSNamesystem implements FSConstants {
   /**
    * remove a datanode descriptor
    * @param nodeInfo datanode descriptor
-   * @author hairong
    */
   private void removeDatanode(DatanodeDescriptor nodeInfo) {
     synchronized (heartbeats) {
@@ -3021,7 +3017,6 @@ class FSNamesystem implements FSConstants {
    *
    * @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)
    * @see SafeModeMonitor
-   * @author Konstantin Shvachko
    */
   class SafeModeInfo {
     // configuration fields
@@ -3251,7 +3246,6 @@ class FSNamesystem implements FSConstants {
    * Periodically check whether it is time to leave safe mode.
    * This thread starts when the threshold level is reached.
    *
-   * @author Konstantin Shvachko
    */
   class SafeModeMonitor implements Runnable {
     /** interval in msec for checking safe mode: {@value} */

+ 0 - 1
src/java/org/apache/hadoop/dfs/FileUnderConstruction.java

@@ -28,7 +28,6 @@ import java.util.*;
  * This class contains a <code>Collection</code> of blocks that has
  * been written into the file so far, and file replication. 
  * 
- * @author shv
  */
 class FileUnderConstruction {
   private short blockReplication; // file replication

+ 0 - 1
src/java/org/apache/hadoop/dfs/FsckServlet.java

@@ -31,7 +31,6 @@ import javax.servlet.http.HttpServletResponse;
 
 /**
  * This class is used in Namesystem's jetty to do fsck on namenode.
- * @author Milind Bhandarkar
  */
 public class FsckServlet extends HttpServlet {
 

+ 0 - 1
src/java/org/apache/hadoop/dfs/GetImageServlet.java

@@ -34,7 +34,6 @@ import javax.servlet.http.HttpServletResponse;
  * This class is used in Namesystem's jetty to retrieve a file.
  * Typically used by the Secondary NameNode to retrieve image and
  * edit file for periodic checkpointing.
- * @author Dhruba Borthakur
  */
 public class GetImageServlet extends HttpServlet {
 

+ 0 - 1
src/java/org/apache/hadoop/dfs/InconsistentFSStateException.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.util.StringUtils;
  * The exception is thrown when file system state is inconsistent 
  * and is not recoverable. 
  * 
- * @author Konstantin Shvachko
  */
 class InconsistentFSStateException extends IOException {
 

+ 0 - 1
src/java/org/apache/hadoop/dfs/IncorrectVersionException.java

@@ -23,7 +23,6 @@ import java.io.IOException;
  * The exception is thrown when external version does not match 
  * current version of the appication.
  * 
- * @author Konstantin Shvachko
  */
 class IncorrectVersionException extends IOException {
 

+ 0 - 1
src/java/org/apache/hadoop/dfs/LeaseExpiredException.java

@@ -4,7 +4,6 @@ import java.io.IOException;
 
 /**
  * The lease that was being used to create this file has expired.
- * @author Owen O'Malley
  */
 public class LeaseExpiredException extends IOException {
   public LeaseExpiredException(String msg) {

+ 0 - 1
src/java/org/apache/hadoop/dfs/LocatedBlock.java

@@ -25,7 +25,6 @@ import java.io.*;
  * A LocatedBlock is a pair of Block, DatanodeInfo[]
  * objects.  It tells where to find a Block.
  * 
- * @author Michael Cafarella
  ****************************************************/
 class LocatedBlock implements Writable {
 

+ 0 - 1
src/java/org/apache/hadoop/dfs/NameNode.java

@@ -66,7 +66,6 @@ import org.apache.hadoop.metrics.Updater;
  * methods are invoked repeatedly and automatically by all the
  * DataNodes in a DFS deployment.
  *
- * @author Mike Cafarella
  **********************************************************/
 public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
   public long getProtocolVersion(String protocol, 

+ 0 - 3
src/java/org/apache/hadoop/dfs/NamenodeFsck.java

@@ -59,8 +59,6 @@ import org.apache.hadoop.io.UTF8;
  *  Additionally, the tool collects a detailed overall DFS statistics, and
  *  optionally can print detailed statistics on block locations and replication
  *  factors of each file.
- *
- * @author Andrzej Bialecki
  */
 public class NamenodeFsck {
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.NameNode");
@@ -462,7 +460,6 @@ public class NamenodeFsck {
   /**
    * FsckResult of checking, plus overall DFS statistics.
    *
-   * @author Andrzej Bialecki
    */
   public static class FsckResult {
     private ArrayList<String> missingIds = new ArrayList<String>();

+ 0 - 1
src/java/org/apache/hadoop/dfs/NamespaceInfo.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.io.WritableFactory;
  * NamespaceInfo is returned by the name-node in reply 
  * to a data-node handshake.
  * 
- * @author Konstantin Shvachko
  */
 class NamespaceInfo extends StorageInfo implements Writable {
   String  buildVersion;

+ 0 - 1
src/java/org/apache/hadoop/dfs/NotReplicatedYetException.java

@@ -4,7 +4,6 @@ import java.io.IOException;
 
 /**
  * The file has not finished being written to enough datanodes yet.
- * @author Owen O'Malley
  */
 public class NotReplicatedYetException extends IOException {
   public NotReplicatedYetException(String msg) {

+ 0 - 1
src/java/org/apache/hadoop/dfs/PendingCreates.java

@@ -31,7 +31,6 @@ import java.util.*;
  *     Mapping: fileName -> FileUnderConstruction
  * 2)  a global set of all blocks that are part of all pending files.
  *
- * @author Dhruba Borthakur
  ***************************************************/
 class PendingCreates {
   private Map<UTF8, FileUnderConstruction> pendingCreates =

+ 0 - 1
src/java/org/apache/hadoop/dfs/PendingReplicationBlocks.java

@@ -33,7 +33,6 @@ import java.sql.Time;
  * 3)  a thread that periodically identifies replication-requests
  *     that never made it.
  *
- * @author Dhruba Borthakur
  ***************************************************/
 class PendingReplicationBlocks {
   private Log LOG = null;

+ 0 - 2
src/java/org/apache/hadoop/dfs/ReplicationTargetChooser.java

@@ -29,8 +29,6 @@ import java.util.*;
  * otherwise a random datanode. The 2nd replica is placed on a datanode
  * that is on a different rack. The 3rd replica is placed on a datanode
  * which is on the same rack as the first replca.
- * @author hairong
- *
  */
 class ReplicationTargetChooser {
   private final boolean considerLoad; 

+ 0 - 1
src/java/org/apache/hadoop/dfs/SafeModeException.java

@@ -6,7 +6,6 @@ import java.io.IOException;
  * This exception is thrown when the name node is in safe mode.
  * Client cannot modified namespace until the safe mode is off. 
  * 
- * @author Konstantin Shvachko
  */
 public class SafeModeException extends IOException {
 

+ 0 - 2
src/java/org/apache/hadoop/dfs/SecondaryNameNode.java

@@ -46,7 +46,6 @@ import javax.servlet.http.HttpServletResponse;
  * The Secondary NameNode uses the ClientProtocol to talk to the
  * primary NameNode.
  *
- * @author  Dhruba Borthakur
  **********************************************************/
 public class SecondaryNameNode implements FSConstants, Runnable {
     
@@ -428,7 +427,6 @@ public class SecondaryNameNode implements FSConstants, Runnable {
    * This class is used in Namesystem's jetty to retrieve a file.
    * Typically used by the Secondary NameNode to retrieve image and
    * edit file for periodic checkpointing.
-   * @author Dhruba Borthakur
    */
   public static class GetImageServlet extends HttpServlet {
     @SuppressWarnings("unchecked")

+ 0 - 2
src/java/org/apache/hadoop/dfs/Storage.java

@@ -40,7 +40,6 @@ import org.apache.hadoop.util.VersionInfo;
  * Common class for storage information.
  * 
  * TODO namespaceID should be long and computed as hash(address + port)
- * @author Konstantin Shvachko
  */
 class StorageInfo {
   int   layoutVersion;  // Version read from the stored file.
@@ -85,7 +84,6 @@ class StorageInfo {
  * other nodes were not able to startup sharing the same storage.
  * The locks are released when the servers stop (normally or abnormally).
  * 
- * @author Konstantin Shvachko
  */
 abstract class Storage extends StorageInfo {
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.dfs.Storage");

+ 0 - 1
src/java/org/apache/hadoop/dfs/TransferFsImage.java

@@ -26,7 +26,6 @@ import javax.servlet.http.HttpServletRequest;
 
 /**
  * This class provides fetching a specified file from the NameNode.
- * @author Dhruba Borthakur
  */
 class TransferFsImage implements FSConstants {
   

+ 0 - 1
src/java/org/apache/hadoop/dfs/UnregisteredDatanodeException.java

@@ -7,7 +7,6 @@ import java.io.IOException;
  * This exception is thrown when a datanode that has not previously 
  * registered is trying to access the name node.
  * 
- * @author Konstantin Shvachko
  */
 class UnregisteredDatanodeException extends IOException {
 

+ 0 - 1
src/java/org/apache/hadoop/filecache/DistributedCache.java

@@ -33,7 +33,6 @@ import java.net.URI;
  * The DistributedCache maintains all the caching information of cached archives
  * and unarchives all the files as well and returns the path
  * 
- * @author Mahadev Konar
  ******************************************************************************/
 public class DistributedCache {
   // cacheID to cacheStatus mapping

+ 0 - 1
src/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -35,7 +35,6 @@ import org.apache.hadoop.util.StringUtils;
  * which creates a checksum file for each raw file.
  * It generates & verifies checksums at the client side.
  *
- * @author Hairong Kuang
  *****************************************************************/
 public abstract class ChecksumFileSystem extends FilterFileSystem {
   private static final byte[] CHECKSUM_VERSION = new byte[] {'c', 'r', 'c', 0};

+ 0 - 1
src/java/org/apache/hadoop/fs/FSInputStream.java

@@ -23,7 +23,6 @@ import java.io.*;
  * FSInputStream is a generic old InputStream with a little bit
  * of RAF-style seek ability.
  *
- * @author Mike Cafarella
  *****************************************************************/
 public abstract class FSInputStream extends InputStream
     implements Seekable, PositionedReadable {

+ 0 - 1
src/java/org/apache/hadoop/fs/FileSystem.java

@@ -45,7 +45,6 @@ import org.apache.hadoop.util.*;
  * <p>
  * The local implementation is {@link LocalFileSystem} and distributed
  * implementation is {@link DistributedFileSystem}.
- * @author Mike Cafarella
  *****************************************************************/
 public abstract class FileSystem extends Configured {
   public static final Log LOG = LogFactory.getLog("org.apache.hadoop.fs.FileSystem");

+ 0 - 1
src/java/org/apache/hadoop/fs/FileUtil.java

@@ -345,7 +345,6 @@ public class FileUtil {
    * Class for creating hardlinks.
    * Supports Unix, Cygwin, WindXP.
    *  
-   * @author Konstantin Shvachko
    */
   public static class HardLink { 
     enum OSType {

+ 0 - 1
src/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -38,7 +38,6 @@ import org.apache.hadoop.util.Progressable;
  * and may also provide additional methods
  * and fields.
  *
- * @author Hairong Kuang
  *****************************************************************/
 public class FilterFileSystem extends FileSystem {
   

+ 0 - 2
src/java/org/apache/hadoop/fs/InMemoryFileSystem.java

@@ -33,8 +33,6 @@ import org.apache.hadoop.util.Progressable;
  * reserveSpaceWithCheckSum(Path f, int size) (see below for a description of
  * the API for reserving space in the FS. The uri of this filesystem starts with
  * ramfs:// .
- * @author ddas
- *
  */
 public class InMemoryFileSystem extends ChecksumFileSystem {
   private static class RawInMemoryFileSystem extends FileSystem {

+ 0 - 1
src/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -25,7 +25,6 @@ import java.util.*;
 /****************************************************************
  * Implement the FileSystem API for the checksumed local filesystem.
  *
- * @author Mike Cafarella
  *****************************************************************/
 public class LocalFileSystem extends ChecksumFileSystem {
   static final URI NAME = URI.create("file:///");

+ 0 - 1
src/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -30,7 +30,6 @@ import org.apache.hadoop.util.Progressable;
 /****************************************************************
  * Implement the FileSystem API for the raw local filesystem.
  *
- * @author Mike Cafarella
  *****************************************************************/
 public class RawLocalFileSystem extends FileSystem {
   static final URI NAME = URI.create("file:///");

+ 0 - 1
src/java/org/apache/hadoop/fs/s3/S3FileSystem.java

@@ -22,7 +22,6 @@ import org.apache.hadoop.util.Progressable;
  * <p>
  * A {@link FileSystem} backed by <a href="http://aws.amazon.com/s3">Amazon S3</a>.
  * </p>
- * @author Tom White
  */
 public class S3FileSystem extends FileSystem {
 

+ 0 - 1
src/java/org/apache/hadoop/io/BytesWritable.java

@@ -27,7 +27,6 @@ import java.io.DataOutput;
  * It is resizable and distinguishes between the size of the seqeunce and
  * the current capacity. The hash function is the front of the md5 of the 
  * buffer. The sort order is the same as memcmp.
- * @author Doug Cutting
  */
 public class BytesWritable implements WritableComparable {
   private int size;

+ 0 - 1
src/java/org/apache/hadoop/io/DataInputBuffer.java

@@ -38,7 +38,6 @@ import java.io.*;
  * }
  * </pre>
  *  
- * @author Doug Cutting
  */
 public class DataInputBuffer extends DataInputStream {
 

+ 0 - 1
src/java/org/apache/hadoop/io/DataOutputBuffer.java

@@ -38,7 +38,6 @@ import java.io.*;
  * }
  * </pre>
  *  
- * @author Doug Cutting
  */
 public class DataOutputBuffer extends DataOutputStream {
 

+ 0 - 1
src/java/org/apache/hadoop/io/GenericWritable.java

@@ -43,7 +43,6 @@ import java.io.IOException;
  * }
  * </pre></blockquote>
  * 
- * @author Feng Jiang (Feng.a.Jiang@gmail.com)
  * @since Nov 8, 2006
  */
 public abstract class GenericWritable implements Writable {

+ 0 - 2
src/java/org/apache/hadoop/io/MD5Hash.java

@@ -25,8 +25,6 @@ import java.util.Arrays;
 import java.security.*;
 
 /** A Writable for MD5 hash values.
- *
- * @author Doug Cutting
  */
 public class MD5Hash implements WritableComparable {
   public static final int MD5_LEN = 16;

+ 0 - 1
src/java/org/apache/hadoop/io/UTF8.java

@@ -29,7 +29,6 @@ import org.apache.commons.logging.*;
  * 
  * <p>Also includes utilities for efficiently reading and writing UTF-8.
  *
- * @author Doug Cutting
  * @deprecated replaced by Text
  */
 public class UTF8 implements WritableComparable {

+ 0 - 1
src/java/org/apache/hadoop/io/VIntWritable.java

@@ -23,7 +23,6 @@ import java.io.*;
 /** A WritableComparable for integer values stored in variable-length format.
  * Such values take between one and five bytes.  Smaller values take fewer bytes.
  * 
- * @author Andrzej Bialecki
  * @see org.apache.hadoop.io.WritableUtils#readVInt(DataInput)
  */
 public class VIntWritable implements WritableComparable {

+ 0 - 1
src/java/org/apache/hadoop/io/VLongWritable.java

@@ -23,7 +23,6 @@ import java.io.*;
 /** A WritableComparable for longs in a variable-length format. Such values take
  *  between one and five bytes.  Smaller values take fewer bytes.
  *  
- *  @author Andrzej Bialecki
  *  @see org.apache.hadoop.io.WritableUtils#readVLong(DataInput)
  */
 public class VLongWritable implements WritableComparable {

+ 0 - 2
src/java/org/apache/hadoop/io/VersionedWritable.java

@@ -28,8 +28,6 @@ import java.io.IOException;
  * old version of the class may still be processed by the new version.  To
  * handle this situation, {@link #readFields(DataInput)}
  * implementations should catch {@link VersionMismatchException}.
- *
- * @author Doug Cutting
  */
 public abstract class VersionedWritable implements Writable {
 

+ 0 - 2
src/java/org/apache/hadoop/io/Writable.java

@@ -28,8 +28,6 @@ import java.io.IOException;
  * <p>Implementations typically implement a static <code>read(DataInput)</code>
  * method which constructs a new instance, calls {@link
  * #readFields(DataInput)}, and returns the instance.
- *
- * @author Doug Cutting
  */
 public interface Writable {
   /** Writes the fields of this object to <code>out</code>. */

+ 0 - 2
src/java/org/apache/hadoop/io/WritableComparable.java

@@ -19,8 +19,6 @@
 package org.apache.hadoop.io;
 
 /** An interface which extends both {@link Writable} and {@link Comparable}.
- *
- * @author Doug Cutting
  */
 public interface WritableComparable extends Writable, Comparable {
 }

+ 0 - 1
src/java/org/apache/hadoop/io/WritableName.java

@@ -25,7 +25,6 @@ import org.apache.hadoop.conf.Configuration;
 
 /** Utility to permit renaming of Writable implementation classes without
  * invalidiating files that contain their class name.
- * @author Doug Cutting
  */
 public class WritableName {
   private static HashMap<String, Class> NAME_TO_CLASS =

+ 0 - 1
src/java/org/apache/hadoop/io/compress/BlockCompressorStream.java

@@ -26,7 +26,6 @@ import java.io.OutputStream;
  * with 'block-based' based compression algorithms, as opposed to 
  * 'stream-based' compression algorithms.
  *  
- * @author Arun C Murthy
  */
 class BlockCompressorStream extends CompressorStream {
 

+ 0 - 1
src/java/org/apache/hadoop/io/compress/BlockDecompressorStream.java

@@ -27,7 +27,6 @@ import java.io.InputStream;
  * with 'block-based' based compression algorithms, as opposed to 
  * 'stream-based' compression algorithms.
  *  
- * @author Arun C Murthy
  */
 class BlockDecompressorStream extends DecompressorStream {
   private int originalBlockSize = 0;

+ 0 - 1
src/java/org/apache/hadoop/io/compress/CompressionCodec.java

@@ -24,7 +24,6 @@ import java.io.OutputStream;
 
 /**
  * This class encapsulates a streaming compression/decompression pair.
- * @author Owen O'Malley
  */
 public interface CompressionCodec {
 

Some files were not shown because too many files changed in this diff