Explorar o código

HADOOP-4704. Fix javadoc typos "the the". (szetszwo)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@720930 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze %!s(int64=16) %!d(string=hai) anos
pai
achega
9be3b247b9

+ 2 - 0
CHANGES.txt

@@ -219,6 +219,8 @@ Trunk (unreleased changes)
     name "value", so the counter values can not be seen. (Jason Attributor
     name "value", so the counter values can not be seen. (Jason Attributor
     and Brian Bockelman via stack)
     and Brian Bockelman via stack)
 
 
+    HADOOP-4704. Fix javadoc typos "the the". (szetszwo)
+
 Release 0.19.1 - Unreleased
 Release 0.19.1 - Unreleased
 
 
   BUG FIXES
   BUG FIXES

+ 3 - 6
src/contrib/data_join/src/java/org/apache/hadoop/contrib/utils/join/ResetableIterator.java

@@ -22,12 +22,9 @@ import java.io.IOException;
 import java.util.Iterator;
 import java.util.Iterator;
 
 
 /**
 /**
- * This interface defines an iterator interface that will help the reducer class
- * for re-grouping the values in the values iterator of the reduce method
- * according the their source tags. Once the value re-grouped, the reducer can
- * perform the cross product over the values in different groups.
- * 
- * 
+ * This defines an iterator interface that will help the reducer class
+ * re-group its input by source tags. Once the values are re-grouped,
+ * the reducer will receive the cross product of values from different groups.
  */
  */
 public interface ResetableIterator extends Iterator {
 public interface ResetableIterator extends Iterator {
   public void reset();
   public void reset();

+ 2 - 2
src/core/org/apache/hadoop/conf/Configuration.java

@@ -1153,9 +1153,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   }
   }
 
 
   /** 
   /** 
-   * Set the quiteness-mode. 
+   * Set the quietness-mode. 
    * 
    * 
-   * In the the quite-mode error and informational messages might not be logged.
+   * In the quiet-mode, error and informational messages might not be logged.
    * 
    * 
    * @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
    * @param quietmode <code>true</code> to set quiet-mode on, <code>false</code>
    *              to turn it off.
    *              to turn it off.

+ 2 - 2
src/core/org/apache/hadoop/fs/HarFileSystem.java

@@ -518,7 +518,7 @@ public class HarFileSystem extends FilterFileSystem {
   @Override
   @Override
   public FileStatus getFileStatus(Path f) throws IOException {
   public FileStatus getFileStatus(Path f) throws IOException {
     FileStatus archiveStatus = fs.getFileStatus(archiveIndex);
     FileStatus archiveStatus = fs.getFileStatus(archiveIndex);
-    // get the fs DataInputStream for the the underlying file
+    // get the fs DataInputStream for the underlying file
     // look up the index.
     // look up the index.
     Path p = makeQualified(f);
     Path p = makeQualified(f);
     Path harPath = getPathInHar(p);
     Path harPath = getPathInHar(p);
@@ -547,7 +547,7 @@ public class HarFileSystem extends FilterFileSystem {
    */
    */
   @Override
   @Override
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
   public FSDataInputStream open(Path f, int bufferSize) throws IOException {
-    // get the fs DataInputStream for the the underlying file
+    // get the fs DataInputStream for the underlying file
     // look up the index.
     // look up the index.
     Path p = makeQualified(f);
     Path p = makeQualified(f);
     Path harPath = getPathInHar(p);
     Path harPath = getPathInHar(p);

+ 1 - 1
src/core/org/apache/hadoop/util/NativeCodeLoader.java

@@ -25,7 +25,7 @@ import org.apache.hadoop.conf.Configuration;
 /**
 /**
  * A helper to load the native hadoop code i.e. libhadoop.so.
  * A helper to load the native hadoop code i.e. libhadoop.so.
  * This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
  * This handles the fallback to either the bundled libhadoop-Linux-i386-32.so
- * or the the default java implementations where appropriate.
+ * or the default java implementations where appropriate.
  *  
  *  
  */
  */
 public class NativeCodeLoader {
 public class NativeCodeLoader {

+ 1 - 1
src/examples/org/apache/hadoop/examples/dancing/DancingLinks.java

@@ -275,7 +275,7 @@ public class DancingLinks<ColumnName> {
   }
   }
   
   
   /**
   /**
-   * Find a solution the the problem.
+   * Find a solution to the problem.
    * @param partial a temporary datastructure to keep the current partial 
    * @param partial a temporary datastructure to keep the current partial 
    *                answer in
    *                answer in
    * @param output the acceptor for the results that are found
    * @param output the acceptor for the results that are found

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java

@@ -26,7 +26,7 @@ import java.io.IOException;
  * on number of files and directories) or a diskspace quota (limit on space 
  * on number of files and directories) or a diskspace quota (limit on space 
  * taken by all the file under the directory tree). <br> <br>
  * taken by all the file under the directory tree). <br> <br>
  * 
  * 
- * The message for the exception specifies the the directory where the quota
+ * The message for the exception specifies the directory where the quota
  * was violated and actual quotas.
  * was violated and actual quotas.
  */
  */
 public final class QuotaExceededException extends IOException {
 public final class QuotaExceededException extends IOException {

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -2575,7 +2575,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean {
                                  getReplication(timedOutItems[i]));
                                  getReplication(timedOutItems[i]));
         }
         }
       }
       }
-      /* If we know the the target datanodes where the replication timedout,
+      /* If we know the target datanodes where the replication timedout,
        * we could invoke decBlocksScheduled() on it. Its ok for now.
        * we could invoke decBlocksScheduled() on it. Its ok for now.
        */
        */
     }
     }

+ 2 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/ReplicationTargetChooser.java

@@ -483,7 +483,7 @@ class ReplicationTargetChooser {
 
 
   /**
   /**
    * Verify that the block is replicated on at least 2 different racks
    * Verify that the block is replicated on at least 2 different racks
-   * if there is more than one rack in the the system.
+   * if there is more than one rack in the system.
    * 
    * 
    * @param lBlk block with locations
    * @param lBlk block with locations
    * @param cluster 
    * @param cluster 
@@ -499,7 +499,7 @@ class ReplicationTargetChooser {
 
 
   /**
   /**
    * Verify that the block is replicated on at least minRacks different racks
    * Verify that the block is replicated on at least minRacks different racks
-   * if there is more than minRacks rack in the the system.
+   * if there is more than minRacks rack in the system.
    * 
    * 
    * @param lBlk block with locations
    * @param lBlk block with locations
    * @param minRacks number of racks the block should be replicated to
    * @param minRacks number of racks the block should be replicated to

+ 1 - 1
src/mapred/org/apache/hadoop/mapred/JobQueueJobInProgressListener.java

@@ -91,7 +91,7 @@ class JobQueueJobInProgressListener extends JobInProgressListener {
   }
   }
 
 
   /**
   /**
-   * Returns a synchronized view of the the job queue.
+   * Returns a synchronized view of the job queue.
    */
    */
   public Collection<JobInProgress> getJobQueue() {
   public Collection<JobInProgress> getJobQueue() {
     return jobQueue.values();
     return jobQueue.values();

+ 1 - 1
src/mapred/org/apache/hadoop/mapred/jobcontrol/JobControl.java

@@ -148,7 +148,7 @@ public class JobControl implements Runnable{
 
 
   /**
   /**
    * Add a new job.
    * Add a new job.
-   * @param aJob the the new job
+   * @param aJob the new job
    */
    */
   synchronized public String addJob(Job aJob) {
   synchronized public String addJob(Job aJob) {
     String id = this.getNextJobID();
     String id = this.getNextJobID();

+ 1 - 1
src/mapred/org/apache/hadoop/mapred/lib/MultipleOutputs.java

@@ -127,7 +127,7 @@ public class MultipleOutputs {
   private static final String COUNTERS_ENABLED = "mo.counters";
   private static final String COUNTERS_ENABLED = "mo.counters";
 
 
   /**
   /**
-   * Counters group used by the the counters of MultipleOutputs.
+   * Counters group used by the counters of MultipleOutputs.
    */
    */
   private static final String COUNTERS_GROUP = MultipleOutputs.class.getName();
   private static final String COUNTERS_GROUP = MultipleOutputs.class.getName();
 
 

+ 1 - 1
src/mapred/org/apache/hadoop/mapred/pipes/BinaryProtocol.java

@@ -205,7 +205,7 @@ class BinaryProtocol<K1 extends WritableComparable, V1 extends Writable,
 
 
   /**
   /**
    * Create a proxy object that will speak the binary protocol on a socket.
    * Create a proxy object that will speak the binary protocol on a socket.
-   * Upward messages are passed on the the specified handler and downward
+   * Upward messages are passed on the specified handler and downward
    * downward messages are public methods on this object.
    * downward messages are public methods on this object.
    * @param sock The socket to communicate on.
    * @param sock The socket to communicate on.
    * @param handler The handler for the received messages.
    * @param handler The handler for the received messages.

+ 1 - 1
src/test/org/apache/hadoop/fs/loadGenerator/LoadGenerator.java

@@ -38,7 +38,7 @@ import org.apache.hadoop.util.ToolRunner;
  * different client loads.
  * different client loads.
  * It allows the user to generate different mixes of read, write,
  * It allows the user to generate different mixes of read, write,
  * and list requests by specifying the probabilities of read and
  * and list requests by specifying the probabilities of read and
- * write. The user controls the the intensity of the load by
+ * write. The user controls the intensity of the load by
  * adjusting parameters for the number of worker threads and the delay
  * adjusting parameters for the number of worker threads and the delay
  * between operations. While load generators are running, the user
  * between operations. While load generators are running, the user
  * can profile and monitor the running of the NameNode. When a load
  * can profile and monitor the running of the NameNode. When a load

+ 2 - 2
src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -771,7 +771,7 @@ public class MiniDFSCluster {
   
   
   
   
   /**
   /**
-   * This method is valid only if the the data nodes have simulated data
+   * This method is valid only if the data nodes have simulated data
    * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
    * @param dataNodeIndex - data node i which to inject - the index is same as for getDataNodes()
    * @param blocksToInject - the blocks
    * @param blocksToInject - the blocks
    * @throws IOException
    * @throws IOException
@@ -793,7 +793,7 @@ public class MiniDFSCluster {
   }
   }
   
   
   /**
   /**
-   * This method is valid only if the the data nodes have simulated data
+   * This method is valid only if the data nodes have simulated data
    * @param blocksToInject - blocksToInject[] is indexed in the same order as the list 
    * @param blocksToInject - blocksToInject[] is indexed in the same order as the list 
    *             of datanodes returned by getDataNodes()
    *             of datanodes returned by getDataNodes()
    * @throws IOException
    * @throws IOException

+ 1 - 1
src/test/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -496,7 +496,7 @@ public class TestDFSPermission extends TestCase {
       }
       }
     }
     }
 
 
-    /* Log the the permissions and required permissions */
+    /** Log the permissions and required permissions */
     protected void logPermissions() {
     protected void logPermissions() {
       LOG.info("required ancestor permission:"
       LOG.info("required ancestor permission:"
           + Integer.toOctalString(requiredAncestorPermission));
           + Integer.toOctalString(requiredAncestorPermission));

+ 1 - 1
src/test/org/apache/hadoop/mapred/MRCaching.java

@@ -205,7 +205,7 @@ public class MRCaching {
     DistributedCache.addCacheArchive(uri6, conf);
     DistributedCache.addCacheArchive(uri6, conf);
     RunningJob job = JobClient.runJob(conf);
     RunningJob job = JobClient.runJob(conf);
     int count = 0;
     int count = 0;
-    // after the job ran check to see if the the input from the localized cache
+    // after the job ran check to see if the input from the localized cache
     // match the real string. check if there are 3 instances or not.
     // match the real string. check if there are 3 instances or not.
     Path result = new Path(TEST_ROOT_DIR + "/test.txt");
     Path result = new Path(TEST_ROOT_DIR + "/test.txt");
     {
     {