Explorar o código

HADOOP-9323. Fix typos in API documentation. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1449977 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas %!s(int64=12) %!d(string=hai) anos
pai
achega
4840775e3d

+ 2 - 0
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -385,6 +385,8 @@ Release 2.0.4-beta - UNRELEASED
     HADOOP-8569. CMakeLists.txt: define _GNU_SOURCE and _LARGEFILE_SOURCE.
     HADOOP-8569. CMakeLists.txt: define _GNU_SOURCE and _LARGEFILE_SOURCE.
     (Colin Patrick McCabe via atm)
     (Colin Patrick McCabe via atm)
 
 
+    HADOOP-9323. Fix typos in API documentation. (suresh)
+
 Release 2.0.3-alpha - 2013-02-06 
 Release 2.0.3-alpha - 2013-02-06 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 19
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -21,8 +21,6 @@ package org.apache.hadoop.fs;
 import java.io.*;
 import java.io.*;
 import java.util.Arrays;
 import java.util.Arrays;
 
 
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -32,7 +30,7 @@ import org.apache.hadoop.util.PureJavaCrc32;
 
 
 /****************************************************************
 /****************************************************************
  * Abstract Checksumed FileSystem.
  * Abstract Checksumed FileSystem.
- * It provide a basice implementation of a Checksumed FileSystem,
+ * It provide a basic implementation of a Checksumed FileSystem,
  * which creates a checksum file for each raw file.
  * which creates a checksum file for each raw file.
  * It generates & verifies checksums at the client side.
  * It generates & verifies checksums at the client side.
  *
  *
@@ -118,9 +116,6 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
    * It verifies that data matches checksums.
    * It verifies that data matches checksums.
    *******************************************************/
    *******************************************************/
   private static class ChecksumFSInputChecker extends FSInputChecker {
   private static class ChecksumFSInputChecker extends FSInputChecker {
-    public static final Log LOG 
-      = LogFactory.getLog(FSInputChecker.class);
-    
     private ChecksumFileSystem fs;
     private ChecksumFileSystem fs;
     private FSDataInputStream datas;
     private FSDataInputStream datas;
     private FSDataInputStream sums;
     private FSDataInputStream sums;
@@ -374,19 +369,6 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
     private FSDataOutputStream sums;
     private FSDataOutputStream sums;
     private static final float CHKSUM_AS_FRACTION = 0.01f;
     private static final float CHKSUM_AS_FRACTION = 0.01f;
     
     
-    public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
-                          Path file, 
-                          boolean overwrite, 
-                          short replication,
-                          long blockSize,
-                          Configuration conf)
-      throws IOException {
-      this(fs, file, overwrite, 
-           conf.getInt(LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_KEY,
-		       LocalFileSystemConfigKeys.LOCAL_FS_STREAM_BUFFER_SIZE_DEFAULT),
-           replication, blockSize, null);
-    }
-    
     public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
     public ChecksumFSOutputSummer(ChecksumFileSystem fs, 
                           Path file, 
                           Path file, 
                           boolean overwrite,
                           boolean overwrite,

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileContext.java

@@ -1326,7 +1326,7 @@ public final class FileContext {
    * 
    * 
    * 2. Partially qualified URIs (eg scheme but no host)
    * 2. Partially qualified URIs (eg scheme but no host)
    * 
    * 
-   * fs:///A/B/file  Resolved according to the target file sytem. Eg resolving
+   * fs:///A/B/file  Resolved according to the target file system. Eg resolving
    *                 a symlink to hdfs:///A results in an exception because
    *                 a symlink to hdfs:///A results in an exception because
    *                 HDFS URIs must be fully qualified, while a symlink to 
    *                 HDFS URIs must be fully qualified, while a symlink to 
    *                 file:///A will not since Hadoop's local file systems 
    *                 file:///A will not since Hadoop's local file systems 

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/FileSystem.java

@@ -1864,7 +1864,7 @@ public abstract class FileSystem extends Configured implements Closeable {
    * 
    * 
    * Some file systems like LocalFileSystem have an initial workingDir
    * Some file systems like LocalFileSystem have an initial workingDir
    * that we use as the starting workingDir. For other file systems
    * that we use as the starting workingDir. For other file systems
-   * like HDFS there is no built in notion of an inital workingDir.
+   * like HDFS there is no built in notion of an initial workingDir.
    * 
    * 
    * @return if there is built in notion of workingDir then it
    * @return if there is built in notion of workingDir then it
    * is returned; else a null is returned.
    * is returned; else a null is returned.

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/PositionedReadable.java

@@ -43,7 +43,7 @@ public interface PositionedReadable {
     throws IOException;
     throws IOException;
   
   
   /**
   /**
-   * Read number of bytes equalt to the length of the buffer, from a given
+   * Read number of bytes equal to the length of the buffer, from a given
    * position within a file. This does not
    * position within a file. This does not
    * change the current offset of a file, and is thread-safe.
    * change the current offset of a file, and is thread-safe.
    */
    */

+ 5 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/TrashPolicy.java

@@ -79,19 +79,17 @@ public abstract class TrashPolicy extends Configured {
 
 
   /**
   /**
    * Get an instance of the configured TrashPolicy based on the value 
    * Get an instance of the configured TrashPolicy based on the value 
-   * of the configuration paramater fs.trash.classname.
+   * of the configuration parameter fs.trash.classname.
    *
    *
    * @param conf the configuration to be used
    * @param conf the configuration to be used
    * @param fs the file system to be used
    * @param fs the file system to be used
    * @param home the home directory
    * @param home the home directory
    * @return an instance of TrashPolicy
    * @return an instance of TrashPolicy
    */
    */
-  public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home)
-      throws IOException {
-    Class<? extends TrashPolicy> trashClass = conf.getClass("fs.trash.classname",
-                                                      TrashPolicyDefault.class,
-                                                      TrashPolicy.class);
-    TrashPolicy trash = (TrashPolicy) ReflectionUtils.newInstance(trashClass, conf);
+  public static TrashPolicy getInstance(Configuration conf, FileSystem fs, Path home) {
+    Class<? extends TrashPolicy> trashClass = conf.getClass(
+        "fs.trash.classname", TrashPolicyDefault.class, TrashPolicy.class);
+    TrashPolicy trash = ReflectionUtils.newInstance(trashClass, conf);
     trash.initialize(conf, fs, home); // initialize TrashPolicy
     trash.initialize(conf, fs, home); // initialize TrashPolicy
     return trash;
     return trash;
   }
   }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/BytesWritable.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
 
 
 /** 
 /** 
  * A byte sequence that is usable as a key or value.
  * A byte sequence that is usable as a key or value.
- * It is resizable and distinguishes between the size of the seqeunce and
+ * It is resizable and distinguishes between the size of the sequence and
  * the current capacity. The hash function is the front of the md5 of the 
  * the current capacity. The hash function is the front of the md5 of the 
  * buffer. The sort order is the same as memcmp.
  * buffer. The sort order is the same as memcmp.
  */
  */

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java

@@ -128,7 +128,7 @@ public class Text extends BinaryComparable
   /**
   /**
    * Returns the Unicode Scalar Value (32-bit integer value)
    * Returns the Unicode Scalar Value (32-bit integer value)
    * for the character at <code>position</code>. Note that this
    * for the character at <code>position</code>. Note that this
-   * method avoids using the converter or doing String instatiation
+   * method avoids using the converter or doing String instantiation
    * @return the Unicode scalar value at position or -1
    * @return the Unicode scalar value at position or -1
    *          if the position is invalid or points to a
    *          if the position is invalid or points to a
    *          trailing byte
    *          trailing byte
@@ -527,7 +527,7 @@ public class Text extends BinaryComparable
     int length = 0;
     int length = 0;
     int state = LEAD_BYTE;
     int state = LEAD_BYTE;
     while (count < start+len) {
     while (count < start+len) {
-      int aByte = ((int) utf8[count] & 0xFF);
+      int aByte = utf8[count] & 0xFF;
 
 
       switch (state) {
       switch (state) {
       case LEAD_BYTE:
       case LEAD_BYTE:

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/Buffer.java

@@ -192,7 +192,7 @@ public class Buffer implements Comparable, Cloneable {
     int hash = 1;
     int hash = 1;
     byte[] b = this.get();
     byte[] b = this.get();
     for (int i = 0; i < count; i++)
     for (int i = 0; i < count; i++)
-      hash = (31 * hash) + (int)b[i];
+      hash = (31 * hash) + b[i];
     return hash;
     return hash;
   }
   }
   
   

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/record/RecordOutput.java

@@ -26,7 +26,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
- * Interface that alll the serializers have to implement.
+ * Interface that all the serializers have to implement.
  * 
  * 
  * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
  * @deprecated Replaced by <a href="http://hadoop.apache.org/avro/">Avro</a>.
  */
  */