ソースを参照

Merge -r 953880:953881 from trunk to branch-0.21. Fixes: HADOOP-6668

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.21@953884 13f79535-47bb-0310-9956-ffa450edef68
Thomas White 15 年 前
コミット
4155486de4
100 ファイル変更493 行追加9 行削除
  1. 3 0
      CHANGES.txt
  2. 5 0
      src/java/org/apache/hadoop/HadoopIllegalArgumentException.java
  3. 5 0
      src/java/org/apache/hadoop/HadoopVersionAnnotation.java
  4. 2 0
      src/java/org/apache/hadoop/classification/InterfaceAudience.java
  5. 2 0
      src/java/org/apache/hadoop/classification/InterfaceStability.java
  6. 4 0
      src/java/org/apache/hadoop/conf/ConfServlet.java
  7. 5 0
      src/java/org/apache/hadoop/conf/Configurable.java
  8. 4 0
      src/java/org/apache/hadoop/conf/Configuration.java
  9. 5 0
      src/java/org/apache/hadoop/conf/Configured.java
  10. 4 0
      src/java/org/apache/hadoop/fs/AvroFSInput.java
  11. 4 0
      src/java/org/apache/hadoop/fs/BlockLocation.java
  12. 5 1
      src/java/org/apache/hadoop/fs/BufferedFSInputStream.java
  13. 5 0
      src/java/org/apache/hadoop/fs/ChecksumException.java
  14. 4 0
      src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  15. 5 1
      src/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  16. 6 0
      src/java/org/apache/hadoop/fs/ContentSummary.java
  17. 5 0
      src/java/org/apache/hadoop/fs/CreateFlag.java
  18. 4 0
      src/java/org/apache/hadoop/fs/DF.java
  19. 4 0
      src/java/org/apache/hadoop/fs/DU.java
  20. 5 0
      src/java/org/apache/hadoop/fs/FSDataInputStream.java
  21. 5 0
      src/java/org/apache/hadoop/fs/FSDataOutputStream.java
  22. 5 0
      src/java/org/apache/hadoop/fs/FSError.java
  23. 4 1
      src/java/org/apache/hadoop/fs/FSInputChecker.java
  24. 5 0
      src/java/org/apache/hadoop/fs/FSInputStream.java
  25. 5 1
      src/java/org/apache/hadoop/fs/FSOutputSummer.java
  26. 5 0
      src/java/org/apache/hadoop/fs/FileAlreadyExistsException.java
  27. 4 0
      src/java/org/apache/hadoop/fs/FileChecksum.java
  28. 4 0
      src/java/org/apache/hadoop/fs/FileStatus.java
  29. 4 0
      src/java/org/apache/hadoop/fs/FileSystem.java
  30. 5 0
      src/java/org/apache/hadoop/fs/FileUtil.java
  31. 4 0
      src/java/org/apache/hadoop/fs/FilterFileSystem.java
  32. 5 0
      src/java/org/apache/hadoop/fs/FsConstants.java
  33. 6 0
      src/java/org/apache/hadoop/fs/FsServerDefaults.java
  34. 2 0
      src/java/org/apache/hadoop/fs/FsShell.java
  35. 4 0
      src/java/org/apache/hadoop/fs/FsShellPermissions.java
  36. 4 0
      src/java/org/apache/hadoop/fs/FsStatus.java
  37. 4 0
      src/java/org/apache/hadoop/fs/FsUrlConnection.java
  38. 4 0
      src/java/org/apache/hadoop/fs/FsUrlStreamHandler.java
  39. 4 0
      src/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java
  40. 5 0
      src/java/org/apache/hadoop/fs/GlobExpander.java
  41. 4 0
      src/java/org/apache/hadoop/fs/InvalidPathException.java
  42. 4 0
      src/java/org/apache/hadoop/fs/LocalDirAllocator.java
  43. 5 0
      src/java/org/apache/hadoop/fs/LocalFileSystem.java
  44. 5 1
      src/java/org/apache/hadoop/fs/LocalFileSystemConfigKeys.java
  45. 4 0
      src/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java
  46. 4 0
      src/java/org/apache/hadoop/fs/Options.java
  47. 5 0
      src/java/org/apache/hadoop/fs/ParentNotDirectoryException.java
  48. 5 0
      src/java/org/apache/hadoop/fs/Path.java
  49. 5 0
      src/java/org/apache/hadoop/fs/PathFilter.java
  50. 5 0
      src/java/org/apache/hadoop/fs/PositionedReadable.java
  51. 4 0
      src/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  52. 6 0
      src/java/org/apache/hadoop/fs/Seekable.java
  53. 5 0
      src/java/org/apache/hadoop/fs/Syncable.java
  54. 4 0
      src/java/org/apache/hadoop/fs/Trash.java
  55. 2 0
      src/java/org/apache/hadoop/fs/UnresolvedLinkException.java
  56. 5 0
      src/java/org/apache/hadoop/fs/UnsupportedFileSystemException.java
  57. 5 0
      src/java/org/apache/hadoop/fs/ftp/FTPException.java
  58. 4 0
      src/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java
  59. 4 0
      src/java/org/apache/hadoop/fs/ftp/FTPInputStream.java
  60. 4 1
      src/java/org/apache/hadoop/fs/kfs/KFSConfigKeys.java
  61. 4 0
      src/java/org/apache/hadoop/fs/kfs/KFSImpl.java
  62. 4 0
      src/java/org/apache/hadoop/fs/kfs/KFSInputStream.java
  63. 4 0
      src/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java
  64. 4 1
      src/java/org/apache/hadoop/fs/kfs/KosmosFileSystem.java
  65. 22 0
      src/java/org/apache/hadoop/fs/local/package-info.java
  66. 5 0
      src/java/org/apache/hadoop/fs/permission/AccessControlException.java
  67. 4 0
      src/java/org/apache/hadoop/fs/permission/ChmodParser.java
  68. 5 0
      src/java/org/apache/hadoop/fs/permission/FsAction.java
  69. 4 0
      src/java/org/apache/hadoop/fs/permission/FsPermission.java
  70. 5 0
      src/java/org/apache/hadoop/fs/permission/PermissionParser.java
  71. 4 0
      src/java/org/apache/hadoop/fs/permission/PermissionStatus.java
  72. 5 0
      src/java/org/apache/hadoop/fs/permission/UmaskParser.java
  73. 5 0
      src/java/org/apache/hadoop/fs/s3/Block.java
  74. 4 0
      src/java/org/apache/hadoop/fs/s3/FileSystemStore.java
  75. 5 0
      src/java/org/apache/hadoop/fs/s3/INode.java
  76. 4 0
      src/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
  77. 4 0
      src/java/org/apache/hadoop/fs/s3/MigrationTool.java
  78. 4 0
      src/java/org/apache/hadoop/fs/s3/S3Credentials.java
  79. 5 0
      src/java/org/apache/hadoop/fs/s3/S3Exception.java
  80. 4 0
      src/java/org/apache/hadoop/fs/s3/S3FileSystem.java
  81. 4 1
      src/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java
  82. 5 0
      src/java/org/apache/hadoop/fs/s3/S3FileSystemException.java
  83. 4 0
      src/java/org/apache/hadoop/fs/s3/S3InputStream.java
  84. 4 0
      src/java/org/apache/hadoop/fs/s3/S3OutputStream.java
  85. 5 0
      src/java/org/apache/hadoop/fs/s3/VersionMismatchException.java
  86. 5 0
      src/java/org/apache/hadoop/fs/s3native/FileMetadata.java
  87. 4 0
      src/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
  88. 4 0
      src/java/org/apache/hadoop/fs/s3native/NativeFileSystemStore.java
  89. 4 0
      src/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java
  90. 5 0
      src/java/org/apache/hadoop/fs/s3native/PartialListing.java
  91. 4 1
      src/java/org/apache/hadoop/fs/s3native/S3NativeFileSystemConfigKeys.java
  92. 23 0
      src/java/org/apache/hadoop/fs/shell/package-info.java
  93. 22 0
      src/java/org/apache/hadoop/http/package-info.java
  94. 4 0
      src/java/org/apache/hadoop/io/AbstractMapWritable.java
  95. 4 0
      src/java/org/apache/hadoop/io/ArrayFile.java
  96. 5 0
      src/java/org/apache/hadoop/io/ArrayWritable.java
  97. 5 0
      src/java/org/apache/hadoop/io/BinaryComparable.java
  98. 4 0
      src/java/org/apache/hadoop/io/BloomMapFile.java
  99. 5 0
      src/java/org/apache/hadoop/io/BooleanWritable.java
  100. 5 0
      src/java/org/apache/hadoop/io/BoundedByteArrayOutputStream.java

+ 3 - 0
CHANGES.txt

@@ -864,6 +864,9 @@ Release 0.21.0 - Unreleased
     HADOOP-6813. Add a new newInstance method in FileSystem that takes 
     a "user" as argument (ddas via boryas)
 
+    HADOOP-6668.  Apply audience and stability annotations to classes in
+    common.  (tomwhite)
+
   OPTIMIZATIONS
 
     HADOOP-5595. NameNode does not need to run a replicator to choose a

+ 5 - 0
src/java/org/apache/hadoop/HadoopIllegalArgumentException.java

@@ -17,11 +17,16 @@
  */
 package org.apache.hadoop;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Indicates that a method has been passed illegal or invalid argument. This
  * exception is thrown instead of IllegalArgumentException to differentiate the
  * exception thrown in Hadoop implementation from the one thrown in JDK.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class HadoopIllegalArgumentException extends IllegalArgumentException {
   private static final long serialVersionUID = 1L;
   

+ 5 - 0
src/java/org/apache/hadoop/HadoopVersionAnnotation.java

@@ -19,11 +19,16 @@ package org.apache.hadoop;
 
 import java.lang.annotation.*;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * A package attribute that captures the version of Hadoop that was compiled.
  */
 @Retention(RetentionPolicy.RUNTIME)
 @Target(ElementType.PACKAGE)
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
 public @interface HadoopVersionAnnotation {
  
   /**

+ 2 - 0
src/java/org/apache/hadoop/classification/InterfaceAudience.java

@@ -22,6 +22,8 @@ import java.lang.annotation.Documented;
 /**
  * Annotation to inform users of a package, class or method's intended audience.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public class InterfaceAudience {
   /**
    * Intended for use by any project or application.

+ 2 - 0
src/java/org/apache/hadoop/classification/InterfaceStability.java

@@ -23,6 +23,8 @@ import java.lang.annotation.Documented;
  * Annotation to inform users of how much to rely on a particular package,
  * class or method not changing over time.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public class InterfaceStability {
   /**
    * Can evolve while retaining compatibility for minor release boundaries.; 

+ 4 - 0
src/java/org/apache/hadoop/conf/ConfServlet.java

@@ -26,11 +26,15 @@ import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.http.HttpServer;
 
 /**
  * A servlet to print out the running configuration data.
  */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
 public class ConfServlet extends HttpServlet {
   private static final long serialVersionUID = 1L;
 

+ 5 - 0
src/java/org/apache/hadoop/conf/Configurable.java

@@ -18,7 +18,12 @@
 
 package org.apache.hadoop.conf;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** Something that may be configured with a {@link Configuration}. */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public interface Configurable {
 
   /** Set the configuration to be used by this object. */

+ 4 - 0
src/java/org/apache/hadoop/conf/Configuration.java

@@ -61,6 +61,8 @@ import javax.xml.transform.stream.StreamResult;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
@@ -144,6 +146,8 @@ import org.xml.sax.SAXException;
  * <tt>${<i>user.name</i>}</tt> would then ordinarily be resolved to the value
  * of the System property with that name.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class Configuration implements Iterable<Map.Entry<String,String>>,
                                       Writable {
   private static final Log LOG =

+ 5 - 0
src/java/org/apache/hadoop/conf/Configured.java

@@ -18,7 +18,12 @@
 
 package org.apache.hadoop.conf;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** Base class for things that may be configured with a {@link Configuration}. */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class Configured implements Configurable {
 
   private Configuration conf;

+ 4 - 0
src/java/org/apache/hadoop/fs/AvroFSInput.java

@@ -22,8 +22,12 @@ import java.io.Closeable;
 import java.io.IOException;
 
 import org.apache.avro.file.SeekableInput;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 
 /** Adapts an {@link FSDataInputStream} to Avro's SeekableInput interface. */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class AvroFSInput implements Closeable, SeekableInput {
   private final FSDataInputStream stream;
   private final long len;

+ 4 - 0
src/java/org/apache/hadoop/fs/BlockLocation.java

@@ -21,6 +21,8 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
@@ -31,6 +33,8 @@ import org.apache.hadoop.io.WritableFactory;
  * of block. 
  * 
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class BlockLocation implements Writable {
 
   static {               // register a ctor

+ 5 - 1
src/java/org/apache/hadoop/fs/BufferedFSInputStream.java

@@ -20,12 +20,16 @@ package org.apache.hadoop.fs;
 import java.io.BufferedInputStream;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 
 /**
  * A class optimizes reading from FSInputStream by bufferring
  */
 
-
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class BufferedFSInputStream extends BufferedInputStream
 implements Seekable, PositionedReadable {
   /**

+ 5 - 0
src/java/org/apache/hadoop/fs/ChecksumException.java

@@ -20,7 +20,12 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** Thrown for checksum errors. */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class ChecksumException extends IOException {
   private static final long serialVersionUID = 1L;
   private long pos;

+ 4 - 0
src/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -25,6 +25,8 @@ import java.util.zip.CRC32;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
@@ -38,6 +40,8 @@ import org.apache.hadoop.util.StringUtils;
  * It generates & verifies checksums at the client side.
  *
  *****************************************************************/
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public abstract class ChecksumFileSystem extends FilterFileSystem {
   private static final byte[] CHECKSUM_VERSION = new byte[] {'c', 'r', 'c', 0};
   private int bytesPerChecksum = 512;

+ 5 - 1
src/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -18,12 +18,16 @@
 
 package org.apache.hadoop.fs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** 
  * This class contains constants for configuration keys used
  * in the common code.
  *
  */
-
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class CommonConfigurationKeys {
   
   // The Keys

+ 6 - 0
src/java/org/apache/hadoop/fs/ContentSummary.java

@@ -21,9 +21,13 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
 
 /** Store the summary of a content (a directory or a file). */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public class ContentSummary implements Writable{
   private long length;
   private long fileCount;
@@ -72,6 +76,7 @@ public class ContentSummary implements Writable{
   public long getSpaceQuota() {return spaceQuota;}
   
   /** {@inheritDoc} */
+  @InterfaceAudience.Private
   public void write(DataOutput out) throws IOException {
     out.writeLong(length);
     out.writeLong(fileCount);
@@ -82,6 +87,7 @@ public class ContentSummary implements Writable{
   }
 
   /** {@inheritDoc} */
+  @InterfaceAudience.Private
   public void readFields(DataInput in) throws IOException {
     this.length = in.readLong();
     this.fileCount = in.readLong();

+ 5 - 0
src/java/org/apache/hadoop/fs/CreateFlag.java

@@ -17,6 +17,9 @@
  */
 package org.apache.hadoop.fs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /****************************************************************
  *CreateFlag specifies the file create semantic. Users can combine flags like:<br>
  *<code>
@@ -36,6 +39,8 @@ package org.apache.hadoop.fs;
  * <li> append the file if it already exists.
  * </ol>
  *****************************************************************/
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public enum CreateFlag {
 
   /**

+ 4 - 0
src/java/org/apache/hadoop/fs/DF.java

@@ -24,6 +24,8 @@ import java.io.BufferedReader;
 import java.util.EnumSet;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.util.Shell;
@@ -31,6 +33,8 @@ import org.apache.hadoop.util.Shell;
 /** Filesystem disk space usage statistics.
  * Uses the unix 'df' program to get mount points, and java.io.File for
  * space utilization. Tested on Linux, FreeBSD, Cygwin. */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
 public class DF extends Shell {
 
   /** Default DF refresh interval. */

+ 4 - 0
src/java/org/apache/hadoop/fs/DU.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.util.Shell;
 
@@ -26,6 +28,8 @@ import java.io.IOException;
 import java.util.concurrent.atomic.AtomicLong;
 
 /** Filesystem disk space usage statistics.  Uses the unix 'du' program*/
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
 public class DU extends Shell {
   private String  dirPath;
 

+ 5 - 0
src/java/org/apache/hadoop/fs/FSDataInputStream.java

@@ -19,8 +19,13 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** Utility that wraps a {@link FSInputStream} in a {@link DataInputStream}
  * and buffers input through a {@link BufferedInputStream}. */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FSDataInputStream extends DataInputStream
     implements Seekable, PositionedReadable {
 

+ 5 - 0
src/java/org/apache/hadoop/fs/FSDataOutputStream.java

@@ -19,9 +19,14 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** Utility that wraps a {@link OutputStream} in a {@link DataOutputStream},
  * buffers output through a {@link BufferedOutputStream} and creates a checksum
  * file. */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FSDataOutputStream extends DataOutputStream implements Syncable {
   private OutputStream wrappedStream;
 

+ 5 - 0
src/java/org/apache/hadoop/fs/FSError.java

@@ -18,8 +18,13 @@
 
 package org.apache.hadoop.fs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** Thrown for unexpected filesystem errors, presumed to reflect disk errors
  * in the native filesystem. */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FSError extends Error {
   private static final long serialVersionUID = 1L;
 

+ 4 - 1
src/java/org/apache/hadoop/fs/FSInputChecker.java

@@ -23,6 +23,8 @@ import java.util.zip.Checksum;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.StringUtils;
 import java.nio.ByteBuffer;
 import java.nio.IntBuffer;
@@ -31,7 +33,8 @@ import java.nio.IntBuffer;
  * This is a generic input stream for verifying checksums for
  * data before it is read by a user.
  */
-
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Unstable
 abstract public class FSInputChecker extends FSInputStream {
   public static final Log LOG 
   = LogFactory.getLog(FSInputChecker.class);

+ 5 - 0
src/java/org/apache/hadoop/fs/FSInputStream.java

@@ -19,11 +19,16 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /****************************************************************
  * FSInputStream is a generic old InputStream with a little bit
  * of RAF-style seek ability.
  *
  *****************************************************************/
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Unstable
 public abstract class FSInputStream extends InputStream
     implements Seekable, PositionedReadable {
   /**

+ 5 - 1
src/java/org/apache/hadoop/fs/FSOutputSummer.java

@@ -22,11 +22,15 @@ import java.io.IOException;
 import java.io.OutputStream;
 import java.util.zip.Checksum;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * This is a generic output stream for generating checksums for
  * data before it is written to the underlying stream
  */
-
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Unstable
 abstract public class FSOutputSummer extends OutputStream {
   // data checksum
   private Checksum sum;

+ 5 - 0
src/java/org/apache/hadoop/fs/FileAlreadyExistsException.java

@@ -21,10 +21,15 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Used when target file already exists for any operation and 
  * is not configured to be overwritten.  
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FileAlreadyExistsException
     extends IOException {
 

+ 4 - 0
src/java/org/apache/hadoop/fs/FileChecksum.java

@@ -19,9 +19,13 @@ package org.apache.hadoop.fs;
 
 import java.util.Arrays;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
 
 /** An abstract class representing file checksums for files. */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public abstract class FileChecksum implements Writable {
   /** The checksum algorithm name */ 
   public abstract String getAlgorithmName();

+ 4 - 0
src/java/org/apache/hadoop/fs/FileStatus.java

@@ -21,12 +21,16 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 
 /** Interface that represents the client side information for a file.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FileStatus implements Writable, Comparable {
 
   private Path path;

+ 4 - 0
src/java/org/apache/hadoop/fs/FileSystem.java

@@ -38,6 +38,8 @@ import java.util.regex.Pattern;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Options.CreateOpts;
@@ -67,6 +69,8 @@ import org.apache.hadoop.util.ReflectionUtils;
  * The local implementation is {@link LocalFileSystem} and distributed
  * implementation is DistributedFileSystem.
  *****************************************************************/
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public abstract class FileSystem extends Configured implements Closeable {
   public static final String FS_DEFAULT_NAME_KEY = 
                    CommonConfigurationKeys.FS_DEFAULT_NAME_KEY;

+ 5 - 0
src/java/org/apache/hadoop/fs/FileUtil.java

@@ -22,6 +22,9 @@ import java.io.*;
 import java.util.Enumeration;
 import java.util.zip.ZipEntry;
 import java.util.zip.ZipFile;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.util.StringUtils;
@@ -34,6 +37,8 @@ import org.apache.commons.logging.LogFactory;
 /**
  * A collection of file-processing util methods
  */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public class FileUtil {
 
   private static final Log LOG = LogFactory.getLog(FileUtil.class);

+ 4 - 0
src/java/org/apache/hadoop/fs/FilterFileSystem.java

@@ -22,6 +22,8 @@ import java.io.*;
 import java.net.URI;
 import java.util.EnumSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
@@ -41,6 +43,8 @@ import org.apache.hadoop.util.Progressable;
  * and fields.
  *
  *****************************************************************/
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FilterFileSystem extends FileSystem {
   
   protected FileSystem fs;

+ 5 - 0
src/java/org/apache/hadoop/fs/FsConstants.java

@@ -19,9 +19,14 @@ package org.apache.hadoop.fs;
 
 import java.net.URI;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * FileSystem related constants.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public interface FsConstants {
   // URI for local filesystem
   public static final URI LOCAL_FS_URI = URI.create("file:///");

+ 6 - 0
src/java/org/apache/hadoop/fs/FsServerDefaults.java

@@ -21,6 +21,8 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
@@ -29,6 +31,8 @@ import org.apache.hadoop.io.WritableFactory;
  * Provides server default configuration values to clients.
  * 
  ****************************************************/
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public class FsServerDefaults implements Writable {
 
   static { // register a ctor
@@ -80,6 +84,7 @@ public class FsServerDefaults implements Writable {
   // /////////////////////////////////////////
   // Writable
   // /////////////////////////////////////////
+  @InterfaceAudience.Private
   public void write(DataOutput out) throws IOException {
     out.writeLong(blockSize);
     out.writeInt(bytesPerChecksum);
@@ -88,6 +93,7 @@ public class FsServerDefaults implements Writable {
     out.writeInt(fileBufferSize);
   }
 
+  @InterfaceAudience.Private
   public void readFields(DataInput in) throws IOException {
     blockSize = in.readLong();
     bytesPerChecksum = in.readInt();

+ 2 - 0
src/java/org/apache/hadoop/fs/FsShell.java

@@ -26,6 +26,7 @@ import java.text.SimpleDateFormat;
 import java.util.*;
 import java.util.zip.GZIPInputStream;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.shell.CommandFormat;
@@ -44,6 +45,7 @@ import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.StringUtils;
 
 /** Provide command line access to a FileSystem. */
+@InterfaceAudience.Private
 public class FsShell extends Configured implements Tool {
 
   protected FileSystem fs;

+ 4 - 0
src/java/org/apache/hadoop/fs/FsShellPermissions.java

@@ -21,6 +21,8 @@ import java.io.IOException;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FsShell.CmdHandler;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.ChmodParser;
@@ -30,6 +32,8 @@ import org.apache.hadoop.fs.permission.ChmodParser;
  * This class is the home for file permissions related commands.
  * Moved to this separate class since FsShell is getting too large.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class FsShellPermissions {
   
   /*========== chmod ==========*/

+ 4 - 0
src/java/org/apache/hadoop/fs/FsStatus.java

@@ -21,11 +21,15 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
 
 /** This class is used to represent the capacity, free and used space on a
   * {@link FileSystem}.
   */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FsStatus implements Writable {
   private long capacity;
   private long used;

+ 4 - 0
src/java/org/apache/hadoop/fs/FsUrlConnection.java

@@ -23,11 +23,15 @@ import java.net.URISyntaxException;
 import java.net.URL;
 import java.net.URLConnection;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /**
  * Representation of a URL connection to open InputStreams.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class FsUrlConnection extends URLConnection {
 
   private Configuration conf;

+ 4 - 0
src/java/org/apache/hadoop/fs/FsUrlStreamHandler.java

@@ -21,12 +21,16 @@ import java.io.IOException;
 import java.net.URL;
 import java.net.URLStreamHandler;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /**
  * URLStream handler relying on FileSystem and on a given Configuration to
  * handle URL protocols.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class FsUrlStreamHandler extends URLStreamHandler {
 
   private Configuration conf;

+ 4 - 0
src/java/org/apache/hadoop/fs/FsUrlStreamHandlerFactory.java

@@ -21,6 +21,8 @@ import java.net.URLStreamHandlerFactory;
 import java.util.HashMap;
 import java.util.Map;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /**
@@ -33,6 +35,8 @@ import org.apache.hadoop.conf.Configuration;
  * Before returning our handler, we make sure that FileSystem knows an
  * implementation for the requested scheme/protocol.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class FsUrlStreamHandlerFactory implements
     URLStreamHandlerFactory {
 

+ 5 - 0
src/java/org/apache/hadoop/fs/GlobExpander.java

@@ -21,6 +21,11 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class GlobExpander {
   
   static class StringWithOffset {

+ 4 - 0
src/java/org/apache/hadoop/fs/InvalidPathException.java

@@ -18,11 +18,15 @@
 package org.apache.hadoop.fs;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 
 /**
  * Path string is invalid either because it has invalid characters or due to
  * other file system specific reasons.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class InvalidPathException extends HadoopIllegalArgumentException {
   private static final long serialVersionUID = 1L;
 

+ 4 - 0
src/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -27,6 +27,8 @@ import org.apache.hadoop.util.*;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration; 
 
 /** An implementation of a round-robin scheme for disk allocation for creating
@@ -60,6 +62,8 @@ import org.apache.hadoop.conf.Configuration;
  * actually points to the configured directory on the Disk which will be the
  * parent for all file write/read allocations.
  */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
 public class LocalDirAllocator {
   
   //A Map from the config item names like "mapred.local.dir", 

+ 5 - 0
src/java/org/apache/hadoop/fs/LocalFileSystem.java

@@ -22,10 +22,15 @@ import java.io.*;
 import java.net.URI;
 import java.util.*;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /****************************************************************
  * Implement the FileSystem API for the checksumed local filesystem.
  *
  *****************************************************************/
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class LocalFileSystem extends ChecksumFileSystem {
   static final URI NAME = URI.create("file:///");
   static private Random rand = new Random();

+ 5 - 1
src/java/org/apache/hadoop/fs/LocalFileSystemConfigKeys.java

@@ -18,12 +18,16 @@
 
 package org.apache.hadoop.fs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** 
  * This class contains constants for configuration keys used
  * in the local file system, raw local fs and checksum fs.
  *
  */
-
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class LocalFileSystemConfigKeys extends CommonConfigurationKeys {
   public static final String  LOCAL_FS_BLOCK_SIZE_KEY = "file.blocksize";
   public static final long    LOCAL_FS_BLOCK_SIZE_DEFAULT = 64*1024*1024;

+ 4 - 0
src/java/org/apache/hadoop/fs/MD5MD5CRC32FileChecksum.java

@@ -21,6 +21,8 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.WritableUtils;
 import org.xml.sax.Attributes;
@@ -28,6 +30,8 @@ import org.xml.sax.SAXException;
 import org.znerd.xmlenc.XMLOutputter;
 
 /** MD5 of MD5 of CRC32. */
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Unstable
 public class MD5MD5CRC32FileChecksum extends FileChecksum {
   public static final int LENGTH = MD5Hash.MD5_LEN
       + (Integer.SIZE + Long.SIZE)/Byte.SIZE;

+ 4 - 0
src/java/org/apache/hadoop/fs/Options.java

@@ -17,12 +17,16 @@
  */
 package org.apache.hadoop.fs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
 
 /**
  * This class contains options related to file system operations.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public final class Options {
   /**
    * Class to support the varargs for create() options.

+ 5 - 0
src/java/org/apache/hadoop/fs/ParentNotDirectoryException.java

@@ -19,10 +19,15 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Indicates that the parent of specified Path is not a directory
  * as expected.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class ParentNotDirectoryException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 5 - 0
src/java/org/apache/hadoop/fs/Path.java

@@ -22,6 +22,8 @@ import java.net.*;
 import java.io.*;
 import org.apache.avro.reflect.Stringable;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /** Names a file or directory in a {@link FileSystem}.
@@ -29,6 +31,8 @@ import org.apache.hadoop.conf.Configuration;
  * absolute if it begins with a slash.
  */
 @Stringable
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class Path implements Comparable {
 
   /** The directory separator, a slash. */
@@ -306,6 +310,7 @@ public class Path implements Comparable {
   
   
   /** Returns a qualified path object. */
+  @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
   public Path makeQualified(URI defaultUri, Path workingDir ) {
     Path path = this;
     if (!isAbsolute()) {

+ 5 - 0
src/java/org/apache/hadoop/fs/PathFilter.java

@@ -17,6 +17,11 @@
  */
 package org.apache.hadoop.fs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public interface PathFilter {
   /**
    * Tests whether or not the specified abstract pathname should be

+ 5 - 0
src/java/org/apache/hadoop/fs/PositionedReadable.java

@@ -18,9 +18,14 @@
 package org.apache.hadoop.fs;
 
 import java.io.*;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.*;
 
 /** Stream that permits positional reading. */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public interface PositionedReadable {
   /**
    * Read upto the specified number of bytes, from a given

+ 4 - 0
src/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -31,6 +31,8 @@ import java.nio.ByteBuffer;
 import java.util.EnumSet;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Progressable;
@@ -41,6 +43,8 @@ import org.apache.hadoop.util.StringUtils;
  * Implement the FileSystem API for the raw local filesystem.
  *
  *****************************************************************/
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class RawLocalFileSystem extends FileSystem {
   static final URI NAME = URI.create("file:///");
   private Path workingDir;

+ 6 - 0
src/java/org/apache/hadoop/fs/Seekable.java

@@ -19,7 +19,12 @@ package org.apache.hadoop.fs;
 
 import java.io.*;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** Stream that permits seeking. */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public interface Seekable {
   /**
    * Seek to the given offset from the start of the file.
@@ -37,5 +42,6 @@ public interface Seekable {
    * Seeks a different copy of the data.  Returns true if 
    * found a new source, false otherwise.
    */
+  @InterfaceAudience.Private
   boolean seekToNewSource(long targetPos) throws IOException;
 }

+ 5 - 0
src/java/org/apache/hadoop/fs/Syncable.java

@@ -20,7 +20,12 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** This interface for flush/sync operation. */
+@InterfaceAudience.Public
+@InterfaceStability.Evolving
 public interface Syncable {
   /**
    * @deprecated As of HADOOP 0.21.0, replaced by hflush

+ 4 - 0
src/java/org/apache/hadoop/fs/Trash.java

@@ -26,6 +26,8 @@ import java.util.Date;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.permission.FsAction;
@@ -41,6 +43,8 @@ import org.apache.hadoop.util.StringUtils;
  * content, without date support in the filesystem, and without clock
  * synchronization.)
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class Trash extends Configured {
   private static final Log LOG =
     LogFactory.getLog(Trash.class);

+ 2 - 0
src/java/org/apache/hadoop/fs/UnresolvedLinkException.java

@@ -21,11 +21,13 @@ package org.apache.hadoop.fs;
 import java.io.IOException;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 
 /** 
  * Thrown when a symbolic link is encountered in a path.
  */
 @InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Stable
 public class UnresolvedLinkException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 5 - 0
src/java/org/apache/hadoop/fs/UnsupportedFileSystemException.java

@@ -19,9 +19,14 @@ package org.apache.hadoop.fs;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * File system for a given file system name/scheme is not supported
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class UnsupportedFileSystemException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 5 - 0
src/java/org/apache/hadoop/fs/ftp/FTPException.java

@@ -17,9 +17,14 @@
  */
 package org.apache.hadoop.fs.ftp;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * A class to wrap a {@link Throwable} into a Runtime Exception.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FTPException extends RuntimeException {
 
   private static final long serialVersionUID = 1L;

+ 4 - 0
src/java/org/apache/hadoop/fs/ftp/FTPFileSystem.java

@@ -29,6 +29,8 @@ import org.apache.commons.net.ftp.FTP;
 import org.apache.commons.net.ftp.FTPClient;
 import org.apache.commons.net.ftp.FTPFile;
 import org.apache.commons.net.ftp.FTPReply;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -46,6 +48,8 @@ import org.apache.hadoop.util.Progressable;
  * href="http://commons.apache.org/net/">Apache Commons Net</a>.
  * </p>
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FTPFileSystem extends FileSystem {
 
   public static final Log LOG = LogFactory

+ 4 - 0
src/java/org/apache/hadoop/fs/ftp/FTPInputStream.java

@@ -21,9 +21,13 @@ import java.io.IOException;
 import java.io.InputStream;
 
 import org.apache.commons.net.ftp.FTPClient;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileSystem;
 
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class FTPInputStream extends FSInputStream {
 
   InputStream wrappedStream;

+ 4 - 1
src/java/org/apache/hadoop/fs/kfs/KFSConfigKeys.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.fs.kfs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 
 /** 
@@ -25,7 +27,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
  * in the kfs file system. 
  *
  */
-
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class KFSConfigKeys extends CommonConfigurationKeys {
   public static final String  KFS_BLOCK_SIZE_KEY = "kfs.blocksize";
   public static final long    KFS_BLOCK_SIZE_DEFAULT = 64*1024*1024;

+ 4 - 0
src/java/org/apache/hadoop/fs/kfs/KFSImpl.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.fs.kfs;
 
 import java.io.*;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
@@ -30,6 +32,8 @@ import org.kosmix.kosmosfs.access.KfsAccess;
 import org.kosmix.kosmosfs.access.KfsFileAttr;
 import org.apache.hadoop.util.Progressable;
 
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class KFSImpl implements IFSImpl {
     private KfsAccess kfsAccess = null;
     private FileSystem.Statistics statistics;

+ 4 - 0
src/java/org/apache/hadoop/fs/kfs/KFSInputStream.java

@@ -22,12 +22,16 @@ package org.apache.hadoop.fs.kfs;
 import java.io.*;
 import java.nio.ByteBuffer;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FSInputStream;
 
 import org.kosmix.kosmosfs.access.KfsAccess;
 import org.kosmix.kosmosfs.access.KfsInputChannel;
 
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class KFSInputStream extends FSInputStream {
 
     private KfsInputChannel kfsChannel;

+ 4 - 0
src/java/org/apache/hadoop/fs/kfs/KFSOutputStream.java

@@ -24,6 +24,8 @@ import java.net.*;
 import java.util.*;
 import java.nio.ByteBuffer;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.FSDataOutputStream;
@@ -32,6 +34,8 @@ import org.apache.hadoop.util.Progressable;
 import org.kosmix.kosmosfs.access.KfsAccess;
 import org.kosmix.kosmosfs.access.KfsOutputChannel;
 
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class KFSOutputStream extends OutputStream {
 
     private String path;

+ 4 - 1
src/java/org/apache/hadoop/fs/kfs/KosmosFileSystem.java

@@ -24,6 +24,8 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.EnumSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.CreateFlag;
@@ -40,7 +42,8 @@ import org.apache.hadoop.util.Progressable;
  * A FileSystem backed by KFS.
  *
  */
-
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class KosmosFileSystem extends FileSystem {
 
     private FileSystem localFs;

+ 22 - 0
src/java/org/apache/hadoop/fs/local/package-info.java

@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+package org.apache.hadoop.fs.local;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

+ 5 - 0
src/java/org/apache/hadoop/fs/permission/AccessControlException.java

@@ -19,12 +19,17 @@ package org.apache.hadoop.fs.permission;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * An exception class for access control related issues.
  * @deprecated Use {@link org.apache.hadoop.security.AccessControlException} 
  *             instead.
  */
 @Deprecated
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class AccessControlException extends IOException {
   //Required by {@link java.io.Serializable}.
   private static final long serialVersionUID = 1L;

+ 4 - 0
src/java/org/apache/hadoop/fs/permission/ChmodParser.java

@@ -19,12 +19,16 @@ package org.apache.hadoop.fs.permission;
 
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.FileStatus;
 
 /**
  * Parse a permission mode passed in from a chmod command and apply that
  * mode against an existing file.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class ChmodParser extends PermissionParser {
   private static Pattern chmodOctalPattern =
     Pattern.compile("^\\s*[+]?([01]?)([0-7]{3})\\s*$");

+ 5 - 0
src/java/org/apache/hadoop/fs/permission/FsAction.java

@@ -17,9 +17,14 @@
  */
 package org.apache.hadoop.fs.permission;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * File system actions, e.g. read, write, etc.
  */
+@InterfaceAudience.LimitedPrivate({"HDFS"})
+@InterfaceStability.Unstable
 public enum FsAction {
   // POSIX style
   NONE("---"),

+ 4 - 0
src/java/org/apache/hadoop/fs/permission/FsPermission.java

@@ -23,6 +23,8 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.io.Writable;
@@ -32,6 +34,8 @@ import org.apache.hadoop.io.WritableFactory;
 /**
  * A class for file/directory permissions.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class FsPermission implements Writable {
   private static final Log LOG = LogFactory.getLog(FsPermission.class);
 

+ 5 - 0
src/java/org/apache/hadoop/fs/permission/PermissionParser.java

@@ -20,11 +20,16 @@ package org.apache.hadoop.fs.permission;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Base class for parsing either chmod permissions or umask permissions.
  * Includes common code needed by either operation as implemented in
  * UmaskParser and ChmodParser classes.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class PermissionParser {
   protected boolean symbolic = false;
   protected short userMode;

+ 4 - 0
src/java/org/apache/hadoop/fs/permission/PermissionStatus.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.fs.permission;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.*;
 
 import java.io.DataInput;
@@ -26,6 +28,8 @@ import java.io.IOException;
 /**
  * Store permission related information.
  */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
 public class PermissionStatus implements Writable {
   static final WritableFactory FACTORY = new WritableFactory() {
     public Writable newInstance() { return new PermissionStatus(); }

+ 5 - 0
src/java/org/apache/hadoop/fs/permission/UmaskParser.java

@@ -19,6 +19,9 @@ package org.apache.hadoop.fs.permission;
 
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Parse umask value provided as a string, either in octal or symbolic
  * format and return it as a short value. Umask values are slightly
@@ -26,6 +29,8 @@ import java.util.regex.Pattern;
  * or X.
  *
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class UmaskParser extends PermissionParser {
   private static Pattern chmodOctalPattern =
     Pattern.compile("^\\s*[+]?()([0-7]{3})\\s*$"); // no leading 1 for sticky bit

+ 5 - 0
src/java/org/apache/hadoop/fs/s3/Block.java

@@ -18,9 +18,14 @@
 
 package org.apache.hadoop.fs.s3;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Holds metadata about a block of data being stored in a {@link FileSystemStore}.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class Block {
   private long id;
 

+ 4 - 0
src/java/org/apache/hadoop/fs/s3/FileSystemStore.java

@@ -23,12 +23,16 @@ import java.io.IOException;
 import java.net.URI;
 import java.util.Set;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 
 /**
  * A facility for storing and retrieving {@link INode}s and {@link Block}s.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public interface FileSystemStore {
   
   void initialize(URI uri, Configuration conf) throws IOException;

+ 5 - 0
src/java/org/apache/hadoop/fs/s3/INode.java

@@ -25,10 +25,15 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.InputStream;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Holds file metadata including type (regular file, or directory),
  * and the list of blocks that are pointers to the data.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class INode {
 	
   enum FileType {

+ 4 - 0
src/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java

@@ -35,6 +35,8 @@ import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.s3.INode.FileType;
@@ -45,6 +47,8 @@ import org.jets3t.service.model.S3Bucket;
 import org.jets3t.service.model.S3Object;
 import org.jets3t.service.security.AWSCredentials;
 
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class Jets3tFileSystemStore implements FileSystemStore {
   
   private static final String FILE_SYSTEM_NAME = "fs";

+ 4 - 0
src/java/org/apache/hadoop/fs/s3/MigrationTool.java

@@ -26,6 +26,8 @@ import java.net.URLEncoder;
 import java.util.Set;
 import java.util.TreeSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.util.Tool;
@@ -47,6 +49,8 @@ import org.jets3t.service.security.AWSCredentials;
  * - no datafiles are touched.
  * </p>
  */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
 public class MigrationTool extends Configured implements Tool {
   
   private S3Service s3Service;

+ 4 - 0
src/java/org/apache/hadoop/fs/s3/S3Credentials.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.fs.s3;
 
 import java.net.URI;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /**
@@ -27,6 +29,8 @@ import org.apache.hadoop.conf.Configuration;
  * Extracts AWS credentials from the filesystem URI or configuration.
  * </p>
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class S3Credentials {
   
   private String accessKey;

+ 5 - 0
src/java/org/apache/hadoop/fs/s3/S3Exception.java

@@ -20,9 +20,14 @@ package org.apache.hadoop.fs.s3;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Thrown if there is a problem communicating with Amazon S3.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class S3Exception extends IOException {
 
   private static final long serialVersionUID = 1L;

+ 4 - 0
src/java/org/apache/hadoop/fs/s3/S3FileSystem.java

@@ -28,6 +28,8 @@ import java.util.List;
 import java.util.Map;
 import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataInputStream;
@@ -49,6 +51,8 @@ import org.apache.hadoop.util.Progressable;
  * </p>
  * @see NativeS3FileSystem
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class S3FileSystem extends FileSystem {
 
   private URI uri;

+ 4 - 1
src/java/org/apache/hadoop/fs/s3/S3FileSystemConfigKeys.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.fs.s3;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 
 /** 
@@ -25,7 +27,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
  * in the s3 file system. 
  *
  */
-
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class S3FileSystemConfigKeys extends CommonConfigurationKeys {
   public static final String  S3_BLOCK_SIZE_KEY = "s3.blocksize";
   public static final long    S3_BLOCK_SIZE_DEFAULT = 64*1024*1024;

+ 5 - 0
src/java/org/apache/hadoop/fs/s3/S3FileSystemException.java

@@ -19,9 +19,14 @@ package org.apache.hadoop.fs.s3;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Thrown when there is a fatal exception while using {@link S3FileSystem}.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class S3FileSystemException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 4 - 0
src/java/org/apache/hadoop/fs/s3/S3InputStream.java

@@ -25,10 +25,14 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FileSystem;
 
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class S3InputStream extends FSInputStream {
 
   private FileSystemStore store;

+ 4 - 0
src/java/org/apache/hadoop/fs/s3/S3OutputStream.java

@@ -28,11 +28,15 @@ import java.util.Random;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.s3.INode.FileType;
 import org.apache.hadoop.util.Progressable;
 
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class S3OutputStream extends OutputStream {
 
   private Configuration conf;

+ 5 - 0
src/java/org/apache/hadoop/fs/s3/VersionMismatchException.java

@@ -17,10 +17,15 @@
  */
 package org.apache.hadoop.fs.s3;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Thrown when Hadoop cannot read the version of the data stored
  * in {@link S3FileSystem}.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class VersionMismatchException extends S3FileSystemException {
   private static final long serialVersionUID = 1L;
 

+ 5 - 0
src/java/org/apache/hadoop/fs/s3native/FileMetadata.java

@@ -18,11 +18,16 @@
 
 package org.apache.hadoop.fs.s3native;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * <p>
  * Holds basic metadata for a file stored in a {@link NativeFileSystemStore}.
  * </p>
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class FileMetadata {
   private final String key;
   private final long length;

+ 4 - 0
src/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java

@@ -29,6 +29,8 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.s3.S3Credentials;
 import org.apache.hadoop.fs.s3.S3Exception;
@@ -40,6 +42,8 @@ import org.jets3t.service.model.S3Bucket;
 import org.jets3t.service.model.S3Object;
 import org.jets3t.service.security.AWSCredentials;
 
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class Jets3tNativeFileSystemStore implements NativeFileSystemStore {
   
   private S3Service s3Service;

+ 4 - 0
src/java/org/apache/hadoop/fs/s3native/NativeFileSystemStore.java

@@ -23,6 +23,8 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.net.URI;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /**
@@ -30,6 +32,8 @@ import org.apache.hadoop.conf.Configuration;
  * An abstraction for a key-based {@link File} store.
  * </p>
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 interface NativeFileSystemStore {
   
   void initialize(URI uri, Configuration conf) throws IOException;

+ 4 - 0
src/java/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java

@@ -40,6 +40,8 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BufferedFSInputStream;
 import org.apache.hadoop.fs.CreateFlag;
@@ -77,6 +79,8 @@ import org.apache.hadoop.util.Progressable;
  * </p>
  * @see org.apache.hadoop.fs.s3.S3FileSystem
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class NativeS3FileSystem extends FileSystem {
   
   public static final Log LOG = 

+ 5 - 0
src/java/org/apache/hadoop/fs/s3native/PartialListing.java

@@ -18,6 +18,9 @@
 
 package org.apache.hadoop.fs.s3native;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * <p>
  * Holds information on a directory listing for a
@@ -31,6 +34,8 @@ package org.apache.hadoop.fs.s3native;
  * </p>
  * @see NativeFileSystemStore#list(String, int, String)
  */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 class PartialListing {
   
   private final String priorLastKey;

+ 4 - 1
src/java/org/apache/hadoop/fs/s3native/S3NativeFileSystemConfigKeys.java

@@ -18,6 +18,8 @@
 
 package org.apache.hadoop.fs.s3native;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 
 /** 
@@ -25,7 +27,8 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
  * in the s3 file system. 
  *
  */
-
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
 public class S3NativeFileSystemConfigKeys extends CommonConfigurationKeys {
   public static final String  S3_NATIVE_BLOCK_SIZE_KEY = "s3native.blocksize";
   public static final long    S3_NATIVE_BLOCK_SIZE_DEFAULT = 64*1024*1024;

+ 23 - 0
src/java/org/apache/hadoop/fs/shell/package-info.java

@@ -0,0 +1,23 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Unstable
+package org.apache.hadoop.fs.shell;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+

+ 22 - 0
src/java/org/apache/hadoop/http/package-info.java

@@ -0,0 +1,22 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+@InterfaceAudience.LimitedPrivate({"HBase", "HDFS", "MapReduce"})
+@InterfaceStability.Unstable
+package org.apache.hadoop.http;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;

+ 4 - 0
src/java/org/apache/hadoop/io/AbstractMapWritable.java

@@ -24,6 +24,8 @@ import java.util.Map;
 import java.util.concurrent.ConcurrentHashMap;
 import java.util.concurrent.atomic.AtomicReference;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 
@@ -37,6 +39,8 @@ import org.apache.hadoop.conf.Configuration;
  * Class ids range from 1 to 127 so there can be at most 127 distinct classes
  * in any specific map instance.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public abstract class AbstractMapWritable implements Writable, Configurable {
   private AtomicReference<Configuration> conf;
   

+ 4 - 0
src/java/org/apache/hadoop/io/ArrayFile.java

@@ -20,12 +20,16 @@ package org.apache.hadoop.io;
 
 import java.io.*;
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.*;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 
 
 /** A dense file-based mapping from integers to values. */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class ArrayFile extends MapFile {
 
   protected ArrayFile() {}                            // no public ctor

+ 5 - 0
src/java/org/apache/hadoop/io/ArrayWritable.java

@@ -21,6 +21,9 @@ package org.apache.hadoop.io;
 import java.io.*;
 import java.lang.reflect.Array;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** 
  * A Writable for arrays containing instances of a class. The elements of this
  * writable must all be instances of the same class. If this writable will be
@@ -36,6 +39,8 @@ import java.lang.reflect.Array;
  * }
  * </code>
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class ArrayWritable implements Writable {
   private Class<? extends Writable> valueClass;
   private Writable[] values;

+ 5 - 0
src/java/org/apache/hadoop/io/BinaryComparable.java

@@ -18,10 +18,15 @@
 
 package org.apache.hadoop.io;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Interface supported by {@link org.apache.hadoop.io.WritableComparable}
  * types supporting ordering/permutation by a representative set of bytes.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public abstract class BinaryComparable implements Comparable<BinaryComparable> {
 
   /**

+ 4 - 0
src/java/org/apache/hadoop/io/BloomMapFile.java

@@ -24,6 +24,8 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -42,6 +44,8 @@ import org.apache.hadoop.util.hash.Hash;
  * {@link Reader#get(WritableComparable, Writable)} operation, especially in
  * case of sparsely populated MapFile-s.
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class BloomMapFile {
   private static final Log LOG = LogFactory.getLog(BloomMapFile.class);
   public static final String BLOOM_FILE_NAME = "bloom";

+ 5 - 0
src/java/org/apache/hadoop/io/BooleanWritable.java

@@ -20,9 +20,14 @@ package org.apache.hadoop.io;
 
 import java.io.*;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** 
  * A WritableComparable for booleans. 
  */
+@InterfaceAudience.Public
+@InterfaceStability.Stable
 public class BooleanWritable implements WritableComparable {
   private boolean value;
 

+ 5 - 0
src/java/org/apache/hadoop/io/BoundedByteArrayOutputStream.java

@@ -21,11 +21,16 @@ import java.io.EOFException;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * A byte array backed output stream with a limit. The limit should be smaller
  * than the buffer capacity. The object can be reused through <code>reset</code>
  * API and choose different limits in each round.
  */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Unstable
 public class BoundedByteArrayOutputStream extends OutputStream {
   private final byte[] buffer;
   private int limit;

この差分においてかなりの量のファイルが変更されているため、一部のファイルを表示していません