Sfoglia il codice sorgente

HDFS-752. Add interfaces classification to to HDFS source code. Contributed by Suresh Srinivas.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@956155 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 15 anni fa
parent
commit
374b2da05c
100 ha cambiato i file con 311 aggiunte e 19 eliminazioni
  1. 5 3
      CHANGES.txt
  2. 2 2
      src/ant/org/apache/hadoop/ant/DfsTask.java
  3. 4 0
      src/java/org/apache/hadoop/fs/Hdfs.java
  4. 5 0
      src/java/org/apache/hadoop/hdfs/BlockMissingException.java
  5. 3 1
      src/java/org/apache/hadoop/hdfs/BlockReader.java
  6. 3 0
      src/java/org/apache/hadoop/hdfs/DFSClient.java
  7. 2 0
      src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java
  8. 3 0
      src/java/org/apache/hadoop/hdfs/DFSUtil.java
  9. 3 0
      src/java/org/apache/hadoop/hdfs/DeprecatedUTF8.java
  10. 5 0
      src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  11. 2 0
      src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java
  12. 3 0
      src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java
  13. 4 0
      src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
  14. 4 0
      src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
  15. 5 0
      src/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java
  16. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/Block.java
  17. 6 0
      src/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
  18. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java
  19. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  20. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
  21. 8 0
      src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java
  22. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
  23. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  24. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java
  25. 2 0
      src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java
  26. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java
  27. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java
  28. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
  29. 5 0
      src/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
  30. 5 0
      src/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
  31. 5 0
      src/java/org/apache/hadoop/hdfs/protocol/RecoveryInProgressException.java
  32. 4 0
      src/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java
  33. 5 0
      src/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java
  34. 3 1
      src/java/org/apache/hadoop/hdfs/security/token/block/BlockKey.java
  35. 2 0
      src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
  36. 2 0
      src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java
  37. 2 0
      src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java
  38. 3 1
      src/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java
  39. 5 0
      src/java/org/apache/hadoop/hdfs/security/token/block/InvalidBlockTokenException.java
  40. 2 0
      src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  41. 3 0
      src/java/org/apache/hadoop/hdfs/server/common/GenerationStamp.java
  42. 3 0
      src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
  43. 5 0
      src/java/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java
  44. 4 0
      src/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java
  45. 4 0
      src/java/org/apache/hadoop/hdfs/server/common/Storage.java
  46. 2 0
      src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
  47. 2 0
      src/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java
  48. 2 0
      src/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java
  49. 2 0
      src/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
  50. 2 0
      src/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java
  51. 2 0
      src/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java
  52. 2 0
      src/java/org/apache/hadoop/hdfs/server/common/Util.java
  53. 2 0
      src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
  54. 2 0
      src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  55. 2 0
      src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  56. 2 1
      src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
  57. 2 0
      src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
  58. 2 3
      src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java
  59. 2 0
      src/java/org/apache/hadoop/hdfs/server/datanode/Replica.java
  60. 2 0
      src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java
  61. 2 0
      src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
  62. 2 0
      src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java
  63. 2 0
      src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
  64. 3 0
      src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java
  65. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
  66. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java
  67. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java
  68. 3 0
      src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java
  69. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java
  70. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
  71. 2 1
      src/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java
  72. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java
  73. 5 0
      src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java
  74. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/DelegationTokenServlet.java
  75. 3 0
      src/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java
  76. 4 0
      src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  77. 4 0
      src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  78. 3 0
      src/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java
  79. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  80. 4 1
      src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  81. 2 1
      src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
  82. 2 2
      src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
  83. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
  84. 3 1
      src/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
  85. 5 0
      src/java/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java
  86. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
  87. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
  88. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  89. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
  90. 5 0
      src/java/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java
  91. 5 0
      src/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java
  92. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  93. 3 1
      src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
  94. 5 0
      src/java/org/apache/hadoop/hdfs/server/namenode/UnsupportedActionException.java
  95. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
  96. 3 0
      src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java
  97. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java
  98. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivityMBean.java
  99. 2 0
      src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java
  100. 4 0
      src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java

+ 5 - 3
CHANGES.txt

@@ -12,9 +12,6 @@ Trunk (unreleased changes)
 
   IMPROVEMENTS
 
-    HDFS-1110. Reuses objects for commonly used file names in namenode to
-    reduce the heap usage. (suresh)
-
     HDFS-1096. fix for prev. commit. (boryas)
 
     HDFS-1096. allow dfsadmin/mradmin refresh of superuser proxy group
@@ -51,9 +48,14 @@ Trunk (unreleased changes)
     HDFS-1190.  Remove unused getNamenode() method from DataNode.
     (Jeff Ames via jghoman)
 
+    HDFS-1110. Reuses objects for commonly used file names in namenode to
+    reduce the heap usage. (suresh)
+
     HDFS-1114. Implement LightWeightGSet for BlocksMap in order to reduce
     NameNode memory footprint.  (szetszwo)
 
+    HDFS-752. Add interfaces classification to to HDFS source code. (suresh)
+
   BUG FIXES
 
     HDFS-1039. Adding test for  JspHelper.getUGI(jnp via boryas)

+ 2 - 2
src/ant/org/apache/hadoop/ant/DfsTask.java

@@ -30,15 +30,15 @@ import org.apache.hadoop.fs.FsShell;
 import org.apache.tools.ant.AntClassLoader;
 import org.apache.tools.ant.BuildException;
 import org.apache.tools.ant.Task;
-import org.apache.tools.ant.Project;
-import org.apache.tools.ant.types.Path;
 import org.apache.hadoop.util.ToolRunner;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /**
  * {@link org.apache.hadoop.fs.FsShell FsShell} wrapper for ant Task.
  */
+@InterfaceAudience.Private
 public class DfsTask extends Task {
 
   /**

+ 4 - 0
src/java/org/apache/hadoop/fs/Hdfs.java

@@ -27,6 +27,8 @@ import java.util.ArrayList;
 import java.util.EnumSet;
 import java.util.Iterator;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClient;
@@ -37,6 +39,8 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.util.Progressable;
 
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class Hdfs extends AbstractFileSystem {
 
   DFSClient dfs;

+ 5 - 0
src/java/org/apache/hadoop/hdfs/BlockMissingException.java

@@ -20,10 +20,15 @@ package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** 
   * This exception is thrown when a read encounters a block that has no locations
   * associated with it.
   */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class BlockMissingException extends IOException {
 
   private static final long serialVersionUID = 1L;

+ 3 - 1
src/java/org/apache/hadoop/hdfs/BlockReader.java

@@ -30,6 +30,7 @@ import java.io.OutputStream;
 import java.net.Socket;
 import java.nio.ByteBuffer;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
@@ -44,6 +45,7 @@ import org.apache.hadoop.util.DataChecksum;
 /** This is a wrapper around connection to datadone
  * and understands checksum, offset etc
  */
+@InterfaceAudience.Private
 public class BlockReader extends FSInputChecker {
 
   Socket dnSock; //for now just sending checksumOk.
@@ -456,4 +458,4 @@ public class BlockReader extends FSInputChecker {
                 ": " + e.getMessage());
     }
   }
-}
+}

+ 3 - 0
src/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -44,6 +44,7 @@ import javax.net.SocketFactory;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
@@ -114,6 +115,7 @@ import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
  * filesystem tasks.
  *
  ********************************************************/
+@InterfaceAudience.Private
 public class DFSClient implements FSConstants, java.io.Closeable {
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final long SERVER_DEFAULTS_VALIDITY_PERIOD = 60 * 60 * 1000L; // 1 hour
@@ -1439,6 +1441,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
   /**
    * The Hdfs implementation of {@link FSDataInputStream}
    */
+  @InterfaceAudience.Private
   public static class DFSDataInputStream extends FSDataInputStream {
     public DFSDataInputStream(DFSInputStream in)
       throws IOException {

+ 2 - 0
src/java/org/apache/hadoop/hdfs/DFSConfigKeys.java

@@ -18,6 +18,7 @@
 
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 
 /** 
@@ -26,6 +27,7 @@ import org.apache.hadoop.fs.CommonConfigurationKeys;
  *
  */
 
+@InterfaceAudience.Private
 public class DFSConfigKeys extends CommonConfigurationKeys {
 
   public static final String  DFS_BLOCK_SIZE_KEY = "dfs.blocksize";

+ 3 - 0
src/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -22,10 +22,12 @@ import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.util.StringTokenizer;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
 
+@InterfaceAudience.Private
 public class DFSUtil {
   /**
    * Whether the pathname is valid.  Currently prohibits relative paths, 
@@ -55,6 +57,7 @@ public class DFSUtil {
   /**
    * Utility class to facilitate junit test error simulation.
    */
+  @InterfaceAudience.Private
   public static class ErrorSimulator {
     private static boolean[] simulation = null; // error simulation events
     public static void initializeErrorSimulationEvent(int numberOfEvents) {

+ 3 - 0
src/java/org/apache/hadoop/hdfs/DeprecatedUTF8.java

@@ -22,6 +22,8 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /**
  * A simple wrapper around {@link org.apache.hadoop.io.UTF8}.
  * This class should be used only when it is absolutely necessary
@@ -31,6 +33,7 @@ import java.io.IOException;
  * 
  * This should be treated as package private class to HDFS.
  */
+@InterfaceAudience.Private
 @SuppressWarnings("deprecation")
 public class DeprecatedUTF8 extends org.apache.hadoop.io.UTF8 {
   

+ 5 - 0
src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -25,6 +25,8 @@ import java.util.EnumSet;
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.*;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
@@ -52,6 +54,8 @@ import org.apache.hadoop.fs.Options;
  * DistributedFileSystem.
  *
  *****************************************************************/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class DistributedFileSystem extends FileSystem {
   private Path workingDir;
   private URI uri;
@@ -413,6 +417,7 @@ public class DistributedFileSystem extends FileSystem {
   }        
   
   /** @deprecated Use {@link org.apache.hadoop.fs.FsStatus} instead */
+  @InterfaceAudience.Private
   @Deprecated
   public static class DiskStatus extends FsStatus {
     public DiskStatus(FsStatus stats) {

+ 2 - 0
src/java/org/apache/hadoop/hdfs/HDFSPolicyProvider.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
@@ -30,6 +31,7 @@ import org.apache.hadoop.security.authorize.Service;
 /**
  * {@link PolicyProvider} for HDFS protocols.
  */
+@InterfaceAudience.Private
 public class HDFSPolicyProvider extends PolicyProvider {
   private static final Service[] hdfsServices =
     new Service[] {

+ 3 - 0
src/java/org/apache/hadoop/hdfs/HdfsConfiguration.java

@@ -20,9 +20,12 @@ package org.apache.hadoop.hdfs;
 
 import org.apache.hadoop.conf.Configuration;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /**
  * Adds deprecated keys into the configuration.
  */
+@InterfaceAudience.Private
 public class HdfsConfiguration extends Configuration {
   static {
     addDeprecatedKeys();

+ 4 - 0
src/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -33,6 +33,8 @@ import java.util.EnumSet;
 import java.util.Random;
 import java.util.TimeZone;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
@@ -63,6 +65,8 @@ import org.xml.sax.helpers.XMLReaderFactory;
  * @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
  * @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class HftpFileSystem extends FileSystem {
   static {
     HttpURLConnection.setFollowRedirects(true);

+ 4 - 0
src/java/org/apache/hadoop/hdfs/HsftpFileSystem.java

@@ -37,6 +37,8 @@ import javax.net.ssl.TrustManager;
 import javax.net.ssl.TrustManagerFactory;
 import javax.net.ssl.X509TrustManager;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 
 /**
@@ -47,6 +49,8 @@ import org.apache.hadoop.conf.Configuration;
  * @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
  * @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class HsftpFileSystem extends HftpFileSystem {
 
   private static final long MM_SECONDS_PER_DAY = 1000 * 60 * 60 * 24;

+ 5 - 0
src/java/org/apache/hadoop/hdfs/protocol/AlreadyBeingCreatedException.java

@@ -20,10 +20,15 @@ package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * The exception that happens when you ask to create a file that already
  * is being created, but is not closed yet.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class AlreadyBeingCreatedException extends IOException {
   static final long serialVersionUID = 0x12308AD009L;
   public AlreadyBeingCreatedException(String msg) {

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/Block.java

@@ -21,6 +21,8 @@ import java.io.*;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.io.*;
 
@@ -29,6 +31,8 @@ import org.apache.hadoop.io.*;
  * long.
  *
  **************************************************/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class Block implements Writable, Comparable<Block> {
   public static final String BLOCK_FILE_PREFIX = "blk_";
   public static final String METADATA_EXTENSION = ".meta";

+ 6 - 0
src/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.protocol;
 import java.util.Iterator;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
 
@@ -40,6 +42,8 @@ import org.apache.hadoop.hdfs.server.datanode.ReplicaInfo;
  *   represented by 4 longs: three for the block id, length, generation 
  *   stamp, and the forth for the replica state.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class BlockListAsLongs implements Iterable<Block> {
   /**
    * A finalized block as 3 longs
@@ -131,6 +135,8 @@ public class BlockListAsLongs implements Iterable<Block> {
    * Iterates over blocks in the block report.
    * Avoids object allocation on each iteration.
    */
+  @InterfaceAudience.Private
+  @InterfaceStability.Evolving
   public class BlockReportIterator implements Iterator<Block> {
     private int currentBlockIndex;
     private Block block;

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java

@@ -21,12 +21,16 @@ import java.io.IOException;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSelector;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.token.TokenInfo;
 
 /** An client-datanode protocol for block recovery
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 @TokenInfo(BlockTokenSelector.class)
 public interface ClientDatanodeProtocol extends VersionedProtocol {
   public static final Log LOG = LogFactory.getLog(ClientDatanodeProtocol.class);

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -22,6 +22,8 @@ import java.io.IOException;
 
 import org.apache.avro.reflect.Nullable;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileStatus;
@@ -55,6 +57,8 @@ import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSelector;
  * as well as open/close file streams, etc.
  *
  **********************************************************************/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 @KerberosInfo(
     serverPrincipal = DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
 @TokenInfo(DelegationTokenSelector.class)

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java

@@ -18,8 +18,12 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.StringUtils;
 
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class DSQuotaExceededException extends QuotaExceededException {
   protected static final long serialVersionUID = 1L;
 

+ 8 - 0
src/java/org/apache/hadoop/hdfs/protocol/DataTransferProtocol.java

@@ -24,6 +24,8 @@ import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.OutputStream;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
@@ -33,6 +35,8 @@ import org.apache.hadoop.security.token.Token;
 /**
  * Transfer data to/from datanode using a streaming protocol.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public interface DataTransferProtocol {
   
   
@@ -214,6 +218,8 @@ public interface DataTransferProtocol {
 
 
   /** Sender */
+  @InterfaceAudience.Private
+  @InterfaceStability.Evolving
   public static class Sender {
     /** Initialize a operation. */
     public static void op(DataOutputStream out, Op op) throws IOException {
@@ -461,6 +467,8 @@ public interface DataTransferProtocol {
   }
   
   /** reply **/
+  @InterfaceAudience.Private
+  @InterfaceStability.Evolving
   public static class PipelineAck implements Writable {
     private long seqno;
     private Status replies[];

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java

@@ -22,6 +22,8 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.DeprecatedUTF8;
 import org.apache.hadoop.io.WritableComparable;
 
@@ -31,6 +33,8 @@ import org.apache.hadoop.io.WritableComparable;
  * which it currently represents.
  * 
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class DatanodeID implements WritableComparable<DatanodeID> {
   public static final DatanodeID[] EMPTY_ARRAY = {}; 
 

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -22,6 +22,8 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.Date;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
@@ -40,6 +42,8 @@ import org.apache.avro.reflect.Nullable;
  * This object is used for communication in the
  * Datanode Protocol and the Client Protocol.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class DatanodeInfo extends DatanodeID implements Node {
   protected long capacity;
   protected long dfsUsed;

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/DirectoryListing.java

@@ -20,6 +20,8 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
@@ -28,6 +30,8 @@ import org.apache.hadoop.io.WritableFactory;
  * This class defines a partial listing of a directory to support
  * iterative directory listing.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class DirectoryListing implements Writable {
   static {                                      // register a ctor
     WritableFactories.setFactory

+ 2 - 0
src/java/org/apache/hadoop/hdfs/protocol/FSConstants.java

@@ -17,12 +17,14 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 
 /************************************
  * Some handy constants
  *
  ************************************/
+@InterfaceAudience.Private
 public interface FSConstants {
   public static int MIN_BLOCKS_FOR_WRITE = 5;
 

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java

@@ -21,6 +21,8 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSUtil;
@@ -33,6 +35,8 @@ import org.apache.avro.reflect.Nullable;
 
 /** Interface that represents the over the wire information for a file.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class HdfsFileStatus implements Writable {
   static {                                      // register a ctor
     WritableFactories.setFactory

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/LocatedBlock.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.security.token.Token;
@@ -28,6 +30,8 @@ import java.io.*;
  * objects.  It tells where to find a Block.
  * 
  ****************************************************/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class LocatedBlock implements Writable {
 
   static {                                      // register a ctor

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java

@@ -25,6 +25,8 @@ import java.util.ArrayList;
 import java.util.Collections;
 import java.util.Comparator;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
@@ -34,6 +36,8 @@ import org.apache.avro.reflect.Nullable;
 /**
  * Collection of blocks with their locations and the file length.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class LocatedBlocks implements Writable {
   private long fileLength;
   private List<LocatedBlock> blocks; // array of blocks with prioritized locations

+ 5 - 0
src/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java

@@ -18,6 +18,11 @@
 
 package org.apache.hadoop.hdfs.protocol;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public final class NSQuotaExceededException extends QuotaExceededException {
   protected static final long serialVersionUID = 1L;
   

+ 5 - 0
src/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java

@@ -20,6 +20,9 @@ package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /** 
  * This exception is thrown when modification to HDFS results in violation
  * of a directory quota. A directory quota might be namespace quota (limit 
@@ -32,6 +35,8 @@ import java.io.IOException;
  *  DSQuotaExceededException or
  *  NSQuotaExceededException
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class QuotaExceededException extends IOException {
   protected static final long serialVersionUID = 1L;
   protected String pathName=null;

+ 5 - 0
src/java/org/apache/hadoop/hdfs/protocol/RecoveryInProgressException.java

@@ -19,9 +19,14 @@ package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Exception indicating that a replica is already being recovery.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class RecoveryInProgressException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 4 - 0
src/java/org/apache/hadoop/hdfs/protocol/UnregisteredNodeException.java

@@ -20,12 +20,16 @@ package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
 
 /**
  * This exception is thrown when a node that has not previously 
  * registered is trying to access the name node.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class UnregisteredNodeException extends IOException {
   private static final long serialVersionUID = -5620209396945970810L;
 

+ 5 - 0
src/java/org/apache/hadoop/hdfs/protocol/UnresolvedPathException.java

@@ -19,12 +19,17 @@
 package org.apache.hadoop.hdfs.protocol;
 
 import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.Path;
 
 /** 
  * Thrown when a symbolic link is encountered in a path.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public final class UnresolvedPathException extends UnresolvedLinkException {
   private static final long serialVersionUID = 1L;
   private String originalPath;  // The original path containing the link

+ 3 - 1
src/java/org/apache/hadoop/hdfs/security/token/block/BlockKey.java

@@ -20,11 +20,13 @@ package org.apache.hadoop.hdfs.security.token.block;
 
 import javax.crypto.SecretKey;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.security.token.delegation.DelegationKey;
 
 /**
  * Key used for generating and verifying block tokens
  */
+@InterfaceAudience.Private
 public class BlockKey extends DelegationKey {
 
   public BlockKey() {
@@ -34,4 +36,4 @@ public class BlockKey extends DelegationKey {
   public BlockKey(int keyId, long expiryDate, SecretKey key) {
     super(keyId, expiryDate, key);
   }
-}
+}

+ 2 - 0
src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java

@@ -23,12 +23,14 @@ import java.io.DataOutput;
 import java.io.IOException;
 import java.util.EnumSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenSecretManager.AccessMode;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.TokenIdentifier;
 
+@InterfaceAudience.Private
 public class BlockTokenIdentifier extends TokenIdentifier {
   static final Text KIND_NAME = new Text("HDFS_BLOCK_TOKEN");
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSecretManager.java

@@ -30,6 +30,7 @@ import java.util.Map;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -43,6 +44,7 @@ import org.apache.hadoop.security.token.Token;
  * master and slave can generate and verify block tokens. Typically, master mode
  * is used by NN and slave mode is used by DN.
  */
+@InterfaceAudience.Private
 public class BlockTokenSecretManager extends
     SecretManager<BlockTokenIdentifier> {
   public static final Log LOG = LogFactory

+ 2 - 0
src/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.security.token.block;
 
 import java.util.Collection;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -27,6 +28,7 @@ import org.apache.hadoop.security.token.TokenSelector;
 /**
  * A block token selector for HDFS
  */
+@InterfaceAudience.Private
 public class BlockTokenSelector implements TokenSelector<BlockTokenIdentifier> {
 
   @SuppressWarnings("unchecked")

+ 3 - 1
src/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java

@@ -22,6 +22,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
@@ -29,6 +30,7 @@ import org.apache.hadoop.io.WritableFactory;
 /**
  * Object for passing block keys
  */
+@InterfaceAudience.Private
 public class ExportedBlockKeys implements Writable {
   public static final ExportedBlockKeys DUMMY_KEYS = new ExportedBlockKeys();
   private boolean isBlockTokenEnabled;
@@ -109,4 +111,4 @@ public class ExportedBlockKeys implements Writable {
     }
   }
 
-}
+}

+ 5 - 0
src/java/org/apache/hadoop/hdfs/security/token/block/InvalidBlockTokenException.java

@@ -20,9 +20,14 @@ package org.apache.hadoop.hdfs.security.token.block;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * Access token verification failed.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class InvalidBlockTokenException extends IOException {
   private static final long serialVersionUID = 168L;
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -50,6 +50,7 @@ import java.util.concurrent.TimeUnit;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
@@ -184,6 +185,7 @@ import org.apache.hadoop.util.ToolRunner;
  * balancer is running.
  */
 
+@InterfaceAudience.Private
 public class Balancer implements Tool {
   private static final Log LOG = 
     LogFactory.getLog(Balancer.class.getName());

+ 3 - 0
src/java/org/apache/hadoop/hdfs/server/common/GenerationStamp.java

@@ -17,9 +17,12 @@
  */
 package org.apache.hadoop.hdfs.server.common;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /****************************************************************
  * A GenerationStamp is a Hadoop FS primitive, identified by a long.
  ****************************************************************/
+@InterfaceAudience.Private
 public class GenerationStamp implements Comparable<GenerationStamp> {
   /**
    * The first valid generation stamp.

+ 3 - 0
src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java

@@ -21,11 +21,14 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /************************************
  * Some handy internal HDFS constants
  *
  ************************************/
 
+@InterfaceAudience.Private
 public interface HdfsConstants {
   /**
    * Type of the node

+ 5 - 0
src/java/org/apache/hadoop/hdfs/server/common/InconsistentFSStateException.java

@@ -19,6 +19,9 @@ package org.apache.hadoop.hdfs.server.common;
 
 import java.io.File;
 import java.io.IOException;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -26,6 +29,8 @@ import org.apache.hadoop.util.StringUtils;
  * and is not recoverable. 
  * 
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class InconsistentFSStateException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 4 - 0
src/java/org/apache/hadoop/hdfs/server/common/IncorrectVersionException.java

@@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.common;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 
 /**
@@ -26,6 +28,8 @@ import org.apache.hadoop.hdfs.protocol.FSConstants;
  * current version of the application.
  * 
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class IncorrectVersionException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 4 - 0
src/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -31,6 +31,7 @@ import java.util.Properties;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
@@ -57,6 +58,7 @@ import org.apache.hadoop.util.VersionInfo;
  * The locks are released when the servers stop (normally or abnormally).
  * 
  */
+@InterfaceAudience.Private
 public abstract class Storage extends StorageInfo {
   public static final Log LOG = LogFactory.getLog(Storage.class.getName());
 
@@ -104,6 +106,7 @@ public abstract class Storage extends StorageInfo {
    * Implementations can define a type for storage directory by implementing
    * this interface.
    */
+  @InterfaceAudience.Private
   public interface StorageDirType {
     public StorageDirType getStorageDirType();
     public boolean isOfType(StorageDirType type);
@@ -190,6 +193,7 @@ public abstract class Storage extends StorageInfo {
   /**
    * One of the storage directories.
    */
+  @InterfaceAudience.Private
   public class StorageDirectory {
     File              root; // root directory
     FileLock          lock; // storage lock

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java

@@ -21,6 +21,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.Writable;
 
 
@@ -29,6 +30,7 @@ import org.apache.hadoop.io.Writable;
  * 
  * TODO namespaceID should be long and computed as hash(address + port)
  */
+@InterfaceAudience.Private
 public class StorageInfo implements Writable {
   public int   layoutVersion;   // layout version of the storage data
   public int   namespaceID;     // id of the file system

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/common/UpgradeManager.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.common;
 import java.io.IOException;
 import java.util.SortedSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 
@@ -29,6 +30,7 @@ import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
  * {@link #broadcastCommand} is the command that should be 
  *
  */
+@InterfaceAudience.Private
 public abstract class UpgradeManager {
   protected SortedSet<Upgradeable> currentUpgrades = null;
   protected boolean upgradeState = false; // true if upgrade is in progress

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.common;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection.UOSignature;
 
 /**
@@ -27,6 +28,7 @@ import org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection.UOSignature;
  * Contains default implementation of common methods of {@link Upgradeable}
  * interface.
  */
+@InterfaceAudience.Private
 public abstract class UpgradeObject implements Upgradeable {
   protected short status;
   

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.util.SortedSet;
 import java.util.TreeSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.util.StringUtils;
 
@@ -29,6 +30,7 @@ import org.apache.hadoop.util.StringUtils;
  *
  * Upgrade objects should be registered here before they can be used. 
  */
+@InterfaceAudience.Private
 public class UpgradeObjectCollection {
   static {
     initialize();

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java

@@ -21,6 +21,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
@@ -31,6 +32,7 @@ import org.apache.hadoop.io.WritableFactory;
  * 
  * Describes status of current upgrade.
  */
+@InterfaceAudience.Private
 public class UpgradeStatusReport implements Writable {
   protected int version;
   protected short upgradeStatus;

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/common/Upgradeable.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.common;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
 
 /**
@@ -29,6 +30,7 @@ import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
  * That is all components whose layout version is greater or equal to the
  * one returned by {@link #getVersion()} must be upgraded with this object.
  */
+@InterfaceAudience.Private
 public interface Upgradeable extends Comparable<Upgradeable> {
   /**
    * Get the layout version of the upgrade object.

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/common/Util.java

@@ -26,7 +26,9 @@ import java.util.Collection;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 
+@InterfaceAudience.Private
 public final class Util {
   private final static Log LOG = LogFactory.getLog(Util.class.getName());
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java

@@ -45,6 +45,7 @@ import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -954,6 +955,7 @@ class DataBlockScanner implements Runnable {
     }    
   }
   
+  @InterfaceAudience.Private
   public static class Servlet extends HttpServlet {
     private static final long serialVersionUID = 1L;
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -48,6 +48,7 @@ import java.util.concurrent.atomic.AtomicInteger;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configured;
 import org.apache.hadoop.fs.FileSystem;
@@ -147,6 +148,7 @@ import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
  * information to clients or other DataNodes that might be interested.
  *
  **********************************************************/
+@InterfaceAudience.Private
 public class DataNode extends Configured 
     implements InterDatanodeProtocol, ClientDatanodeProtocol, FSConstants, Runnable {
   public static final Log LOG = LogFactory.getLog(DataNode.class);

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -31,6 +31,7 @@ import java.util.Properties;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil.HardLink;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -48,6 +49,7 @@ import org.apache.hadoop.util.Daemon;
  * <p>
  * @see Storage
  */
+@InterfaceAudience.Private
 public class DataStorage extends Storage {
   // Constants
   final static String BLOCK_SUBDIR_PREFIX = "subdir";

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java

@@ -25,13 +25,13 @@ import java.util.LinkedList;
 import java.util.Map;
 import java.util.Random;
 import java.util.concurrent.Callable;
-import java.util.concurrent.ExecutionException;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Future;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -43,6 +43,7 @@ import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
  * Reconciles the differences with block information maintained in
  * {@link FSDataset}
  */
+@InterfaceAudience.Private
 public class DirectoryScanner {
   private static final Log LOG = LogFactory.getLog(DirectoryScanner.class);
   private static final int DEFAULT_SCAN_INTERVAL = 21600;

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

@@ -38,6 +38,7 @@ import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.StandardMBean;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.DU;
@@ -68,6 +69,7 @@ import org.apache.hadoop.io.IOUtils;
  * has a unique name and an extent on disk.
  *
  ***************************************************/
+@InterfaceAudience.Private
 public class FSDataset implements FSConstants, FSDatasetInterface {
 
 

+ 2 - 3
src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java

@@ -24,9 +24,7 @@ import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
 
-
-
-
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
@@ -42,6 +40,7 @@ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
  * SimulatedFSDataset (which simulates data).
  *
  */
+@InterfaceAudience.Private
 public interface FSDatasetInterface extends FSDatasetMBean {
   
   

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/Replica.java

@@ -17,11 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 
 /** 
  * This represents block replicas which stored in DataNode.
  */
+@InterfaceAudience.Private
 public interface Replica {
   /** get block ID  */
   public long getBlockId();

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/ReplicaInfo.java

@@ -22,6 +22,7 @@ import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil.HardLink;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -32,6 +33,7 @@ import org.apache.hadoop.io.IOUtils;
  * This class is used by datanodes to maintain meta data of its replicas.
  * It provides a general interface for meta information of a replica.
  */
+@InterfaceAudience.Private
 abstract public class ReplicaInfo extends Block implements Replica {
   private FSVolume volume;      // volume where the replica belongs
   private File     dir;         // directory where block & meta files belong

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.UpgradeObject;
@@ -31,6 +32,7 @@ import java.net.SocketTimeoutException;
  * Base class for data-node upgrade objects.
  * Data-node upgrades are run in separate threads.
  */
+@InterfaceAudience.Private
 public abstract class UpgradeObjectDatanode extends UpgradeObject implements Runnable {
   private DataNode dataNode = null;
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeActivityMBean.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.datanode.metrics;
 import java.util.Random;
 
 import javax.management.ObjectName;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics.util.MBeanUtil;
 import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
 import org.apache.hadoop.metrics.util.MetricsRegistry;
@@ -53,6 +54,7 @@ import org.apache.hadoop.metrics.util.MetricsRegistry;
  * from the metrics registry passed as an argument to the constructor
  */
 
+@InterfaceAudience.Private
 public class DataNodeActivityMBean extends MetricsDynamicMBeanBase {
   final private ObjectName mbeanName;
   private Random rand = new Random(); 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.datanode.metrics;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.metrics.MetricsContext;
 import org.apache.hadoop.metrics.MetricsRecord;
@@ -43,6 +44,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
  *  <p> {@link #blocksRead}.inc()
  *
  */
+@InterfaceAudience.Private
 public class DataNodeMetrics implements Updater {
   private final MetricsRecord metricsRecord;
   private DataNodeActivityMBean datanodeActivityMBean;

+ 3 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/metrics/FSDatasetMBean.java

@@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.datanode.metrics;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /**
  * 
  * This Interface defines the methods to get the status of a the FSDataset of
@@ -34,6 +36,7 @@ import java.io.IOException;
  * @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeStatisticsMBean
  *
  */
+@InterfaceAudience.Private
 public interface FSDatasetMBean {
   
   /**

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -21,6 +21,7 @@ import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.SocketTimeoutException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations;
@@ -51,6 +52,7 @@ import org.apache.hadoop.net.NetUtils;
  * namespace image to local disk(s).</li>
  * </ol>
  */
+@InterfaceAudience.Private
 public class BackupNode extends NameNode {
   private static final String BN_ADDRESS_NAME_KEY = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
   private static final String BN_ADDRESS_DEFAULT = DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_DEFAULT;

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/BackupStorage.java

@@ -24,6 +24,7 @@ import java.net.URI;
 import java.util.Collection;
 import java.util.Iterator;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
@@ -33,6 +34,7 @@ import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
 import org.apache.hadoop.io.LongWritable;
 
+@InterfaceAudience.Private
 public class BackupStorage extends FSImage {
   // Names of the journal spool directory and the spool file
   private static final String STORAGE_JSPOOL_DIR = "jspool";

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/BlockManager.java

@@ -32,6 +32,7 @@ import java.util.Random;
 import java.util.TreeMap;
 import java.util.TreeSet;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
@@ -48,6 +49,7 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
  * This class is a helper class for {@link FSNamesystem} and requires several
  * methods to be called with lock held on {@link FSNamesystem}.
  */
+@InterfaceAudience.Private
 public class BlockManager {
   // Default initial capacity and load factor of map
   public static final int DEFAULT_INITIAL_MAP_CAPACITY = 16;

+ 3 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicy.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -29,8 +30,10 @@ import java.util.*;
  * This interface is used for choosing the desired number of targets
  * for placing block replicas.
  */
+@InterfaceAudience.Private
 public abstract class BlockPlacementPolicy {
     
+  @InterfaceAudience.Private
   public static class NotEnoughReplicasException extends Exception {
     private static final long serialVersionUID = 1L;
     NotEnoughReplicasException(String msg) {

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/BlockPlacementPolicyDefault.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.hdfs.server.namenode;
 
 import org.apache.commons.logging.*;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -37,6 +38,7 @@ import java.util.*;
  * that is on a different rack. The 3rd replica is placed on a datanode
  * which is on a different node of the rack as the second replica.
  */
+@InterfaceAudience.Private
 public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
   private boolean considerLoad; 
   private NetworkTopology clusterMap;

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java

@@ -21,6 +21,7 @@ import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.common.StorageInfo;
 import org.apache.hadoop.hdfs.server.namenode.FSImage;
 import org.apache.hadoop.io.WritableComparable;
@@ -28,6 +29,7 @@ import org.apache.hadoop.io.WritableComparable;
 /**
  * A unique signature intended to identify checkpoint transactions.
  */
+@InterfaceAudience.Private
 public class CheckpointSignature extends StorageInfo 
                       implements WritableComparable<CheckpointSignature> {
   private static final String FIELD_SEPARATOR = ":";

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/ContentSummaryServlet.java

@@ -25,15 +25,16 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
-import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.znerd.xmlenc.XMLOutputter;
 
 /** Servlets for file checksum */
+@InterfaceAudience.Private
 public class ContentSummaryServlet extends DfsServlet {
   /** For java.io.Serializable */
   private static final long serialVersionUID = 1L;

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/CorruptReplicasMap.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.ipc.Server;
 
@@ -31,6 +32,7 @@ import java.util.*;
  * Mapping: Block -> TreeSet<DatanodeDescriptor> 
  */
 
+@InterfaceAudience.Private
 public class CorruptReplicasMap{
 
   private SortedMap<Block, Collection<DatanodeDescriptor>> corruptReplicasMap =

+ 5 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/DatanodeDescriptor.java

@@ -21,6 +21,8 @@ import java.io.DataInput;
 import java.io.IOException;
 import java.util.*;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -46,6 +48,7 @@ import org.apache.hadoop.io.WritableUtils;
  * fsImage.
 
  **************************************************/
+@InterfaceAudience.Private
 public class DatanodeDescriptor extends DatanodeInfo {
   
   // Stores status of decommissioning.
@@ -53,6 +56,8 @@ public class DatanodeDescriptor extends DatanodeInfo {
   DecommissioningStatus decommissioningStatus = new DecommissioningStatus();
   
   /** Block and targets pair */
+  @InterfaceAudience.Private
+  @InterfaceStability.Evolving
   public static class BlockTargetPair {
     public final Block block;
     public final DatanodeDescriptor[] targets;    

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/DelegationTokenServlet.java

@@ -27,6 +27,7 @@ import javax.servlet.http.HttpServletResponse;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.io.Text;
@@ -37,6 +38,7 @@ import org.apache.hadoop.security.token.Token;
 /**
  * Serve delegation tokens over http for use in hftp.
  */
+@InterfaceAudience.Private
 @SuppressWarnings("serial")
 public class DelegationTokenServlet extends DfsServlet {
   private static final Log LOG = LogFactory.getLog(DelegationTokenServlet.class);

+ 3 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/FSClusterStats.java

@@ -17,10 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /** 
  * This interface is used for retrieving the load related statistics of 
  * the cluster.
  */
+@InterfaceAudience.Private
 public interface FSClusterStats {
 
   /**

+ 4 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -26,6 +26,8 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.Iterator;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -59,6 +61,8 @@ import org.apache.hadoop.security.token.delegation.DelegationKey;
  * FSEditLog maintains a log of the namespace modifications.
  * 
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class FSEditLog {
   public  static final byte OP_INVALID = -1;
   private static final byte OP_ADD = 0;

+ 4 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -43,6 +43,8 @@ import java.util.Properties;
 import java.util.Random;
 import java.util.Set;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
@@ -76,6 +78,8 @@ import org.apache.hadoop.hdfs.DFSConfigKeys;
  * FSImage handles checkpointing and logging of the namespace edits.
  * 
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class FSImage extends Storage {
 
   private static final SimpleDateFormat DATE_FORM =

+ 3 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/FSInodeInfo.java

@@ -17,10 +17,13 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /** 
  * This interface is used used the pluggable block placement policy
  * to expose a few characteristics of an Inode.
  */
+@InterfaceAudience.Private
 public interface FSInodeInfo {
 
   /**

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import org.apache.commons.logging.*;
 
 import org.apache.hadoop.HadoopIllegalArgumentException;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.*;
@@ -109,6 +110,7 @@ import javax.management.StandardMBean;
  * 4)  machine --> blocklist (inverted #2)
  * 5)  LRU cache of updated-heartbeat machines
  ***************************************************/
+@InterfaceAudience.Private
 public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterStats {
   public static final Log LOG = LogFactory.getLog(FSNamesystem.class);
 

+ 4 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -29,13 +29,13 @@ import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
-import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -45,8 +45,10 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.znerd.xmlenc.XMLOutputter;
 
 /** Servlets for file checksum */
+@InterfaceAudience.Private
 public class FileChecksumServlets {
   /** Redirect file checksum queries to an appropriate datanode. */
+  @InterfaceAudience.Private
   public static class RedirectServlet extends DfsServlet {
     /** For java.io.Serializable */
     private static final long serialVersionUID = 1L;
@@ -74,6 +76,7 @@ public class FileChecksumServlets {
   }
   
   /** Get FileChecksum */
+  @InterfaceAudience.Private
   public static class GetServlet extends DfsServlet {
     /** For java.io.Serializable */
     private static final long serialVersionUID = 1L;

+ 2 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java

@@ -25,8 +25,8 @@ import java.security.PrivilegedExceptionAction;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
@@ -39,6 +39,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 /** Redirect queries about the hosted filesystem to an appropriate datanode.
  * @see org.apache.hadoop.hdfs.HftpFileSystem
  */
+@InterfaceAudience.Private
 public class FileDataServlet extends DfsServlet {
   /** For java.io.Serializable */
   private static final long serialVersionUID = 1L;

+ 2 - 2
src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java

@@ -26,15 +26,15 @@ import javax.servlet.ServletContext;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /**
  * This class is used in Namesystem's web server to do fsck on namenode.
  */
+@InterfaceAudience.Private
 public class FsckServlet extends DfsServlet {
   /** for java.io.Serializable */
   private static final long serialVersionUID = 1L;

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java

@@ -25,6 +25,7 @@ import javax.servlet.http.HttpServlet;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.util.StringUtils;
 
 /**
@@ -32,6 +33,7 @@ import org.apache.hadoop.util.StringUtils;
  * Typically used by the Secondary NameNode to retrieve image and
  * edit file for periodic checkpointing.
  */
+@InterfaceAudience.Private
 public class GetImageServlet extends HttpServlet {
   private static final long serialVersionUID = -7669068179452648952L;
 

+ 3 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -26,6 +27,7 @@ import org.apache.hadoop.hdfs.protocol.Block;
 /**
  * An INode representing a symbolic link.
  */
+@InterfaceAudience.Private
 public class INodeSymlink extends INode {
   private byte[] symlink; // The target URI
 
@@ -75,4 +77,4 @@ public class INodeSymlink extends INode {
   public boolean isDirectory() {
     return false;
   }
-}
+}

+ 5 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/LeaseExpiredException.java

@@ -20,9 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * The lease that was being used to create this file has expired.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class LeaseExpiredException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java

@@ -29,6 +29,7 @@ import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
@@ -56,6 +57,7 @@ import static org.apache.hadoop.hdfs.server.common.Util.now;
  *      and removes the lease once all files have been removed
  * 2.10) Namenode commit changes to edit log
  */
+@InterfaceAudience.Private
 public class LeaseManager {
   public static final Log LOG = LogFactory.getLog(LeaseManager.class);
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.hdfs.server.namenode;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.HftpFileSystem;
@@ -45,6 +46,7 @@ import javax.servlet.http.HttpServletResponse;
  * Obtain meta-information about a filesystem.
  * @see org.apache.hadoop.hdfs.HftpFileSystem
  */
+@InterfaceAudience.Private
 public class ListPathsServlet extends DfsServlet {
   /** For java.io.Serializable */
   private static final long serialVersionUID = 1L;

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -28,6 +28,7 @@ import java.util.List;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
@@ -129,6 +130,7 @@ import org.apache.hadoop.util.StringUtils;
  * secondary namenodes or rebalancing processes to get partial namenode's
  * state, for example partial blocksMap etc.
  **********************************************************/
+@InterfaceAudience.Private
 public class NameNode implements NamenodeProtocols, FSConstants {
   static{
     Configuration.addDefaultResource("hdfs-default.xml");

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

@@ -31,6 +31,7 @@ import java.util.TreeSet;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
@@ -70,6 +71,7 @@ import org.apache.hadoop.security.AccessControlException;
  *  optionally can print detailed statistics on block locations and replication
  *  factors of each file.
  */
+@InterfaceAudience.Private
 public class NamenodeFsck {
   public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
   

+ 5 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/NotReplicatedYetException.java

@@ -20,9 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * The file has not finished being written to enough datanodes yet.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class NotReplicatedYetException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 5 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java

@@ -20,11 +20,16 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * This exception is thrown when the name node is in safe mode.
  * Client cannot modified namespace until the safe mode is off. 
  * 
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class SafeModeException extends IOException {
   private static final long serialVersionUID = 1L;
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -28,6 +28,7 @@ import java.util.Iterator;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.hdfs.DFSUtil.ErrorSimulator;
@@ -63,6 +64,7 @@ import org.apache.hadoop.util.StringUtils;
  *
  **********************************************************/
 @Deprecated // use BackupNode with -checkpoint argument instead.
+@InterfaceAudience.Private
 public class SecondaryNameNode implements Runnable {
     
   public static final Log LOG = 

+ 3 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java

@@ -27,15 +27,17 @@ import java.util.List;
 import javax.servlet.ServletException;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
+
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.mortbay.jetty.InclusiveByteRange;
 
+@InterfaceAudience.Private
 public class StreamFile extends DfsServlet {
   /** for java.io.Serializable */
   private static final long serialVersionUID = 1L;

+ 5 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/UnsupportedActionException.java

@@ -20,9 +20,14 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
 /**
  * This exception is thrown when an operation is not supported.
  */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class UnsupportedActionException extends IOException {
   /** for java.io.Serializable */
   private static final long serialVersionUID = 1L;

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.UpgradeObject;
 import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
@@ -27,6 +28,7 @@ import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
  * Base class for name-node upgrade objects.
  * Data-node upgrades are run in separate threads.
  */
+@InterfaceAudience.Private
 public abstract class UpgradeObjectNamenode extends UpgradeObject {
 
   /**

+ 3 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMBean.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.hdfs.server.namenode.metrics;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+
 /**
  * 
  * This Interface defines the methods to get the status of a the FSNamesystem of
@@ -33,6 +35,7 @@ package org.apache.hadoop.hdfs.server.namenode.metrics;
  * @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean
  *
  */
+@InterfaceAudience.Private
 public interface FSNamesystemMBean {
 
   /**

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/metrics/FSNamesystemMetrics.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode.metrics;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -42,6 +43,7 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingInt;
  *  <p> {@link #filesTotal}.set()
  *
  */
+@InterfaceAudience.Private
 public class FSNamesystemMetrics implements Updater {
   private static Log log = LogFactory.getLog(FSNamesystemMetrics.class);
   private final MetricsRecord metricsRecord;

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeActivityMBean.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.metrics;
 
 import javax.management.ObjectName;
 
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.metrics.util.MBeanUtil;
 import org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase;
 import org.apache.hadoop.metrics.util.MetricsRegistry;
@@ -52,6 +53,7 @@ import org.apache.hadoop.metrics.util.MetricsRegistry;
  * from the metrics registry passed as an argument to the constructor
  */
 
+@InterfaceAudience.Private
 public class NameNodeActivityMBean extends MetricsDynamicMBeanBase {
   final private ObjectName mbeanName;
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/metrics/NameNodeMetrics.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode.metrics;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -42,6 +43,7 @@ import org.apache.hadoop.metrics.util.MetricsTimeVaryingRate;
  *  <p> {@link #syncs}.inc()
  *
  */
+@InterfaceAudience.Private
 public class NameNodeMetrics implements Updater {
     private static Log log = LogFactory.getLog(NameNodeMetrics.class);
     private final MetricsRecord metricsRecord;

+ 4 - 0
src/java/org/apache/hadoop/hdfs/server/protocol/BlockCommand.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.server.protocol;
 import java.io.*;
 import java.util.List;
 
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair;
@@ -34,6 +36,8 @@ import org.apache.hadoop.io.*;
  * another DataNode.
  * 
  ****************************************************/
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
 public class BlockCommand extends DatanodeCommand {
   Block blocks[];
   DatanodeInfo targets[][];

Some files were not shown because too many files changed in this diff