1
0
Quellcode durchsuchen

HDFS-3659. Add missing @Override to methods across the hadoop-hdfs project. Contributed by Brandon Li. (harsh)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1361894 13f79535-47bb-0310-9956-ffa450edef68
Harsh J vor 13 Jahren
Ursprung
Commit
0e8e499ff4
100 geänderte Dateien mit 215 neuen und 0 gelöschten Zeilen
  1. 3 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  2. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java
  3. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  4. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java
  5. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java
  6. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java
  7. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java
  8. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java
  9. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java
  10. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java
  11. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java
  12. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java
  13. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java
  14. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java
  15. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java
  16. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  17. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java
  18. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java
  19. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java
  20. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java
  21. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  22. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
  23. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java
  24. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
  25. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java
  26. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java
  27. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
  28. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  29. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java
  30. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
  31. 6 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java
  32. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java
  33. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java
  34. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java
  35. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  36. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java
  37. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java
  38. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java
  39. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java
  40. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  41. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java
  42. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java
  43. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java
  44. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
  45. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java
  46. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java
  47. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java
  48. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java
  49. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java
  50. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java
  51. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
  52. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java
  53. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java
  54. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java
  55. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java
  56. 10 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java
  57. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  58. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java
  59. 5 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  60. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
  61. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java
  62. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
  63. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java
  64. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java
  65. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java
  66. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java
  67. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java
  68. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java
  69. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java
  70. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java
  71. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java
  72. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
  73. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java
  74. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  75. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java
  76. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  77. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
  78. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java
  79. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java
  80. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java
  81. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java
  82. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java
  83. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java
  84. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java
  85. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java
  86. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java
  87. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java
  88. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java
  89. 8 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java
  90. 7 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java
  91. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java
  92. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java
  93. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
  94. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java
  95. 4 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java
  96. 3 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
  97. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
  98. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java
  99. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java
  100. 1 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -330,6 +330,9 @@ Branch-2 ( Unreleased changes )
     HDFS-3663. MiniDFSCluster should capture the code path that led to
     the first ExitException. (eli)
 
+    HDFS-3659. Add missing @Override to methods across the hadoop-hdfs
+    project. (Brandon Li via harsh)
+
   OPTIMIZATIONS
 
     HDFS-2982. Startup performance suffers when there are many edit log

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -597,6 +597,7 @@ public class DFSClient implements java.io.Closeable {
    * Close the file system, abandoning all of the leases and files being
    * created and close connections to the namenode.
    */
+  @Override
   public synchronized void close() throws IOException {
     if(clientRunning) {
       closeAllFilesBeingWritten(false);

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -268,6 +268,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
       return seqno == HEART_BEAT_SEQNO;
     }
     
+    @Override
     public String toString() {
       return "packet seqno:" + this.seqno +
       " offsetInBlock:" + this.offsetInBlock + 
@@ -396,6 +397,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
      * streamer thread is the only thread that opens streams to datanode, 
      * and closes them. Any error recovery is also done by this thread.
      */
+    @Override
     public void run() {
       long lastPacket = Time.now();
       while (!streamerClosed && dfsClient.clientRunning) {
@@ -654,6 +656,7 @@ public class DFSOutputStream extends FSOutputSummer implements Syncable {
         this.targets = targets;
       }
 
+      @Override
       public void run() {
 
         setName("ResponseProcessor for block " + block);

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -110,6 +110,7 @@ public class DFSUtil {
    * Address matcher for matching an address to local address
    */
   static final AddressMatcher LOCAL_ADDRESS_MATCHER = new AddressMatcher() {
+    @Override
     public boolean match(InetSocketAddress s) {
       return NetUtils.isLocalAddress(s.getAddress());
     };

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -113,6 +113,7 @@ public class HftpFileSystem extends FileSystem
 
   protected static final ThreadLocal<SimpleDateFormat> df =
     new ThreadLocal<SimpleDateFormat>() {
+    @Override
     protected SimpleDateFormat initialValue() {
       return getDateFormat();
     }
@@ -240,6 +241,7 @@ public class HftpFileSystem extends FileSystem
       //Renew TGT if needed
       ugi.reloginFromKeytab();
       return ugi.doAs(new PrivilegedExceptionAction<Token<?>>() {
+        @Override
         public Token<?> run() throws IOException {
           final String nnHttpUrl = nnSecureUri.toString();
           Credentials c;
@@ -402,6 +404,7 @@ public class HftpFileSystem extends FileSystem
 
     ArrayList<FileStatus> fslist = new ArrayList<FileStatus>();
 
+    @Override
     public void startElement(String ns, String localname, String qname,
                 Attributes attrs) throws SAXException {
       if ("listing".equals(qname)) return;
@@ -541,6 +544,7 @@ public class HftpFileSystem extends FileSystem
   public void setWorkingDirectory(Path f) { }
 
   /** This optional operation is not yet supported. */
+  @Override
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
     throw new IOException("Not supported");

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/HsftpFileSystem.java

@@ -189,6 +189,7 @@ public class HsftpFileSystem extends HftpFileSystem {
    * Dummy hostname verifier that is used to bypass hostname checking
    */
   protected static class DummyHostnameVerifier implements HostnameVerifier {
+    @Override
     public boolean verify(String hostname, SSLSession session) {
       return true;
     }
@@ -198,12 +199,15 @@ public class HsftpFileSystem extends HftpFileSystem {
    * Dummy trustmanager that is used to trust all server certificates
    */
   protected static class DummyTrustManager implements X509TrustManager {
+    @Override
     public void checkClientTrusted(X509Certificate[] chain, String authType) {
     }
 
+    @Override
     public void checkServerTrusted(X509Certificate[] chain, String authType) {
     }
 
+    @Override
     public X509Certificate[] getAcceptedIssuers() {
       return null;
     }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/SocketCache.java

@@ -134,6 +134,7 @@ class SocketCache {
     multimap.clear();
   }
 
+  @Override
   protected void finalize() {
     clear();
   }

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/Block.java

@@ -40,6 +40,7 @@ public class Block implements Writable, Comparable<Block> {
     WritableFactories.setFactory
       (Block.class,
        new WritableFactory() {
+         @Override
          public Writable newInstance() { return new Block(); }
        });
   }
@@ -146,6 +147,7 @@ public class Block implements Writable, Comparable<Block> {
 
   /**
    */
+  @Override
   public String toString() {
     return getBlockName() + "_" + getGenerationStamp();
   }

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/BlockListAsLongs.java

@@ -148,10 +148,12 @@ public class BlockListAsLongs implements Iterable<Block> {
       this.currentReplicaState = null;
     }
 
+    @Override
     public boolean hasNext() {
       return currentBlockIndex < getNumberOfBlocks();
     }
 
+    @Override
     public Block next() {
       block.set(blockId(currentBlockIndex),
                 blockLength(currentBlockIndex),
@@ -161,6 +163,7 @@ public class BlockListAsLongs implements Iterable<Block> {
       return block;
     }
 
+    @Override
     public void remove() {
       throw new UnsupportedOperationException("Sorry. can't remove.");
     }
@@ -178,6 +181,7 @@ public class BlockListAsLongs implements Iterable<Block> {
   /**
    * Returns an iterator over blocks in the block report. 
    */
+  @Override
   public Iterator<Block> iterator() {
     return getBlockReportIterator();
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java

@@ -37,6 +37,7 @@ public class DSQuotaExceededException extends QuotaExceededException {
     super(quota, count);
   }
 
+  @Override
   public String getMessage() {
     String msg = super.getMessage();
     if (msg == null) {

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeID.java

@@ -150,6 +150,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
     return ipcPort;
   }
 
+  @Override
   public boolean equals(Object to) {
     if (this == to) {
       return true;
@@ -161,10 +162,12 @@ public class DatanodeID implements Comparable<DatanodeID> {
             storageID.equals(((DatanodeID)to).getStorageID()));
   }
   
+  @Override
   public int hashCode() {
     return getXferAddr().hashCode()^ storageID.hashCode();
   }
   
+  @Override
   public String toString() {
     return getXferAddr();
   }
@@ -187,6 +190,7 @@ public class DatanodeID implements Comparable<DatanodeID> {
    * @param that
    * @return as specified by Comparable
    */
+  @Override
   public int compareTo(DatanodeID that) {
     return getXferAddr().compareTo(that.getXferAddr());
   }

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -56,6 +56,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
       this.value = v;
     }
 
+    @Override
     public String toString() {
       return value;
     }
@@ -126,6 +127,7 @@ public class DatanodeInfo extends DatanodeID implements Node {
   }
   
   /** Network location name */
+  @Override
   public String getName() {
     return getXferAddr();
   }
@@ -200,9 +202,11 @@ public class DatanodeInfo extends DatanodeID implements Node {
   }
 
   /** network location */
+  @Override
   public synchronized String getNetworkLocation() {return location;}
     
   /** Sets the network location */
+  @Override
   public synchronized void setNetworkLocation(String location) {
     this.location = NodeBase.normalize(location);
   }
@@ -334,13 +338,17 @@ public class DatanodeInfo extends DatanodeID implements Node {
   private transient Node parent; //its parent
 
   /** Return this node's parent */
+  @Override
   public Node getParent() { return parent; }
+  @Override
   public void setParent(Node parent) {this.parent = parent;}
    
   /** Return this node's level in the tree.
    * E.g. the root of a tree returns 0 and its children return 1
    */
+  @Override
   public int getLevel() { return level; }
+  @Override
   public void setLevel(int level) {this.level = level;}
 
   @Override

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java

@@ -113,6 +113,7 @@ public class LocatedBlocks {
     Comparator<LocatedBlock> comp = 
       new Comparator<LocatedBlock>() {
         // Returns 0 iff a is inside b or b is inside a
+        @Override
         public int compare(LocatedBlock a, LocatedBlock b) {
           long aBeg = a.getStartOffset();
           long bBeg = b.getStartOffset();

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java

@@ -36,6 +36,7 @@ public final class NSQuotaExceededException extends QuotaExceededException {
     super(quota, count);
   }
 
+  @Override
   public String getMessage() {
     String msg = super.getMessage();
     if (msg == null) {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java

@@ -58,6 +58,7 @@ public class QuotaExceededException extends IOException {
     this.pathName = path;
   }
   
+  @Override
   public String getMessage() {
     return super.getMessage();
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -131,6 +131,7 @@ public class ClientNamenodeProtocolTranslatorPB implements
     rpcProxy = proxy;
   }
   
+  @Override
   public void close() {
     RPC.stopProxy(rpcProxy);
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/NamenodeProtocolTranslatorPB.java

@@ -82,6 +82,7 @@ public class NamenodeProtocolTranslatorPB implements NamenodeProtocol,
     this.rpcProxy = rpcProxy;
   }
 
+  @Override
   public void close() {
     RPC.stopProxy(rpcProxy);
   }

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenIdentifier.java

@@ -119,6 +119,7 @@ public class BlockTokenIdentifier extends TokenIdentifier {
   }
 
   /** {@inheritDoc} */
+  @Override
   public boolean equals(Object obj) {
     if (obj == this) {
       return true;
@@ -135,12 +136,14 @@ public class BlockTokenIdentifier extends TokenIdentifier {
   }
 
   /** {@inheritDoc} */
+  @Override
   public int hashCode() {
     return (int) expiryDate ^ keyId ^ (int) blockId ^ modes.hashCode()
         ^ (userId == null ? 0 : userId.hashCode())
         ^ (blockPoolId == null ? 0 : blockPoolId.hashCode());
   }
 
+  @Override
   public void readFields(DataInput in) throws IOException {
     this.cache = null;
     expiryDate = WritableUtils.readVLong(in);
@@ -155,6 +158,7 @@ public class BlockTokenIdentifier extends TokenIdentifier {
     }
   }
 
+  @Override
   public void write(DataOutput out) throws IOException {
     WritableUtils.writeVLong(out, expiryDate);
     WritableUtils.writeVInt(out, keyId);

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/BlockTokenSelector.java

@@ -31,6 +31,7 @@ import org.apache.hadoop.security.token.TokenSelector;
 @InterfaceAudience.Private
 public class BlockTokenSelector implements TokenSelector<BlockTokenIdentifier> {
 
+  @Override
   @SuppressWarnings("unchecked")
   public Token<BlockTokenIdentifier> selectToken(Text service,
       Collection<Token<? extends TokenIdentifier>> tokens) {

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/security/token/block/ExportedBlockKeys.java

@@ -78,6 +78,7 @@ public class ExportedBlockKeys implements Writable {
   static { // register a ctor
     WritableFactories.setFactory(ExportedBlockKeys.class,
         new WritableFactory() {
+          @Override
           public Writable newInstance() {
             return new ExportedBlockKeys();
           }
@@ -86,6 +87,7 @@ public class ExportedBlockKeys implements Writable {
 
   /**
    */
+  @Override
   public void write(DataOutput out) throws IOException {
     out.writeBoolean(isBlockTokenEnabled);
     out.writeLong(keyUpdateInterval);
@@ -99,6 +101,7 @@ public class ExportedBlockKeys implements Writable {
 
   /**
    */
+  @Override
   public void readFields(DataInput in) throws IOException {
     isBlockTokenEnabled = in.readBoolean();
     keyUpdateInterval = in.readLong();

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -378,6 +378,7 @@ public class Balancer {
     /* start a thread to dispatch the block move */
     private void scheduleBlockMove() {
       moverExecutor.execute(new Runnable() {
+        @Override
         public void run() {
           if (LOG.isDebugEnabled()) {
             LOG.debug("Starting moving "+ block.getBlockId() +
@@ -570,6 +571,7 @@ public class Balancer {
     /* A thread that initiates a block move 
      * and waits for block move to complete */
     private class BlockMoveDispatcher implements Runnable {
+      @Override
       public void run() {
         dispatchBlocks();
       }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java

@@ -189,6 +189,7 @@ class NameNodeConnector {
    * Periodically updates access keys.
    */
   class BlockKeyUpdater implements Runnable {
+    @Override
     public void run() {
       try {
         while (shouldRun) {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyWithNodeGroup.java

@@ -52,6 +52,7 @@ public class BlockPlacementPolicyWithNodeGroup extends BlockPlacementPolicyDefau
   BlockPlacementPolicyWithNodeGroup() {
   }
 
+  @Override
   public void initialize(Configuration conf,  FSClusterStats stats,
           NetworkTopology clusterMap) {
     super.initialize(conf, stats, clusterMap);

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java

@@ -37,15 +37,18 @@ class BlocksMap {
       this.blockInfo = blkInfo;
     }
 
+    @Override
     public boolean hasNext() {
       return blockInfo != null && nextIdx < blockInfo.getCapacity()
               && blockInfo.getDatanode(nextIdx) != null;
     }
 
+    @Override
     public DatanodeDescriptor next() {
       return blockInfo.getDatanode(nextIdx++);
     }
 
+    @Override
     public void remove()  {
       throw new UnsupportedOperationException("Sorry. can't remove.");
     }

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeDescriptor.java

@@ -326,16 +326,19 @@ public class DatanodeDescriptor extends DatanodeInfo {
       this.node = dn;
     }
 
+    @Override
     public boolean hasNext() {
       return current != null;
     }
 
+    @Override
     public BlockInfo next() {
       BlockInfo res = current;
       current = current.getNext(current.findDatanode(node));
       return res;
     }
 
+    @Override
     public void remove()  {
       throw new UnsupportedOperationException("Sorry. can't remove.");
     }
@@ -542,6 +545,7 @@ public class DatanodeDescriptor extends DatanodeInfo {
   /**
    * @param nodeReg DatanodeID to update registration for.
    */
+  @Override
   public void updateRegInfo(DatanodeID nodeReg) {
     super.updateRegInfo(nodeReg);
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/PendingReplicationBlocks.java

@@ -192,6 +192,7 @@ class PendingReplicationBlocks {
    * their replication request.
    */
   class PendingReplicationMonitor implements Runnable {
+    @Override
     public void run() {
       while (fsRunning) {
         long period = Math.min(defaultRecheckInterval, timeout);

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java

@@ -141,6 +141,7 @@ public final class HdfsServerConstants {
     private String description = null;
     private NamenodeRole(String arg) {this.description = arg;}
   
+    @Override
     public String toString() {
       return description;
     }

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -109,6 +109,7 @@ public class JspHelper {
   // compare two records based on their frequency
   private static class NodeRecordComparator implements Comparator<NodeRecord> {
 
+    @Override
     public int compare(NodeRecord o1, NodeRecord o2) {
       if (o1.frequency < o2.frequency) {
         return -1;
@@ -312,6 +313,7 @@ public class JspHelper {
         }
       }
 
+      @Override
       public int compare(DatanodeDescriptor d1,
                          DatanodeDescriptor d2) {
         int ret = 0;

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/Storage.java

@@ -123,6 +123,7 @@ public abstract class Storage extends StorageInfo {
       this.prevIndex = 0;
     }
     
+    @Override
     public boolean hasNext() {
       if (storageDirs.isEmpty() || nextIndex >= storageDirs.size())
         return false;
@@ -138,6 +139,7 @@ public abstract class Storage extends StorageInfo {
       return true;
     }
     
+    @Override
     public StorageDirectory next() {
       StorageDirectory sd = getStorageDir(nextIndex);
       prevIndex = nextIndex;
@@ -152,6 +154,7 @@ public abstract class Storage extends StorageInfo {
       return sd;
     }
     
+    @Override
     public void remove() {
       nextIndex = prevIndex; // restore previous state
       storageDirs.remove(prevIndex); // remove last returned element

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java

@@ -78,6 +78,7 @@ public class StorageInfo {
     cTime = from.cTime;
   }
   
+  @Override
   public String toString() {
     StringBuilder sb = new StringBuilder();
     sb.append("lv=").append(layoutVersion).append(";cid=").append(clusterID)

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObject.java

@@ -32,19 +32,23 @@ import org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection.UOSignature;
 public abstract class UpgradeObject implements Upgradeable {
   protected short status;
   
+  @Override
   public short getUpgradeStatus() {
     return status;
   }
 
+  @Override
   public String getDescription() {
     return "Upgrade object for " + getType() + " layout version " + getVersion();
   }
 
+  @Override
   public UpgradeStatusReport getUpgradeStatusReport(boolean details) 
                                                     throws IOException {
     return new UpgradeStatusReport(getVersion(), getUpgradeStatus(), false);
   }
 
+  @Override
   public int compareTo(Upgradeable o) {
     if(this.getVersion() != o.getVersion())
       return (getVersion() > o.getVersion() ? -1 : 1);
@@ -55,6 +59,7 @@ public abstract class UpgradeObject implements Upgradeable {
                     o.getClass().getCanonicalName());
   }
 
+  @Override
   public boolean equals(Object o) {
     if (!(o instanceof UpgradeObject)) {
       return false;
@@ -62,6 +67,7 @@ public abstract class UpgradeObject implements Upgradeable {
     return this.compareTo((UpgradeObject)o) == 0;
   }
 
+  @Override
   public int hashCode() {
     return new UOSignature(this).hashCode(); 
   }

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeObjectCollection.java

@@ -73,6 +73,7 @@ public class UpgradeObjectCollection {
       }
     }
 
+    @Override
     public int compareTo(UOSignature o) {
       if(this.version != o.version)
         return (version < o.version ? -1 : 1);
@@ -82,6 +83,7 @@ public class UpgradeObjectCollection {
       return className.compareTo(o.className);
     }
 
+    @Override
     public boolean equals(Object o) {
         if (!(o instanceof UOSignature)) {
           return false;
@@ -89,6 +91,7 @@ public class UpgradeObjectCollection {
         return this.compareTo((UOSignature)o) == 0;
       }
 
+      @Override
       public int hashCode() {
         return version ^ ((type==null)?0:type.hashCode()) 
                        ^ ((className==null)?0:className.hashCode());

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/UpgradeStatusReport.java

@@ -82,6 +82,7 @@ public class UpgradeStatusReport {
   /**
    * Print basic upgradeStatus details.
    */
+  @Override
   public String toString() {
     return getStatusText(false);
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolManager.java

@@ -122,6 +122,7 @@ class BlockPoolManager {
     try {
       UserGroupInformation.getLoginUser().doAs(
           new PrivilegedExceptionAction<Object>() {
+            @Override
             public Object run() throws Exception {
               for (BPOfferService bpos : offerServices) {
                 bpos.start();

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -115,10 +115,12 @@ class BlockPoolSliceScanner {
       this.block = block;
     }
     
+    @Override
     public int hashCode() {
       return block.hashCode();
     }
     
+    @Override
     public boolean equals(Object other) {
       return other instanceof BlockScanInfo &&
              compareTo((BlockScanInfo)other) == 0;
@@ -128,6 +130,7 @@ class BlockPoolSliceScanner {
       return (lastScanType == ScanType.NONE) ? 0 : lastScanTime;
     }
     
+    @Override
     public int compareTo(BlockScanInfo other) {
       long t1 = lastScanTime;
       long t2 = other.lastScanTime;

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceStorage.java

@@ -440,6 +440,7 @@ public class BlockPoolSliceStorage extends Storage {
 
     // delete finalized.tmp dir in a separate thread
     new Daemon(new Runnable() {
+      @Override
       public void run() {
         try {
           deleteDir(tmpDir);
@@ -449,6 +450,7 @@ public class BlockPoolSliceStorage extends Storage {
         LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
       }
 
+      @Override
       public String toString() {
         return "Finalize " + dataDirPath;
       }

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

@@ -246,6 +246,7 @@ class BlockReceiver implements Closeable {
   /**
    * close files.
    */
+  @Override
   public void close() throws IOException {
     IOException ioe = null;
     if (syncOnClose && (out != null || checksumOut != null)) {
@@ -1033,6 +1034,7 @@ class BlockReceiver implements Closeable {
      * Thread to process incoming acks.
      * @see java.lang.Runnable#run()
      */
+    @Override
     public void run() {
       boolean lastPacketInBlock = false;
       final long startTime = ClientTraceLog.isInfoEnabled() ? System.nanoTime() : 0;

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -330,6 +330,7 @@ class BlockSender implements java.io.Closeable {
   /**
    * close opened files.
    */
+  @Override
   public void close() throws IOException {
     if (blockInFd != null && shouldDropCacheBehindRead && isLongRead()) {
       // drop the last few MB of the file from cache

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataBlockScanner.java

@@ -63,6 +63,7 @@ public class DataBlockScanner implements Runnable {
     this.conf = conf;
   }
   
+  @Override
   public void run() {
     String currentBpId = "";
     boolean firstRun = true;
@@ -273,6 +274,7 @@ public class DataBlockScanner implements Runnable {
   public static class Servlet extends HttpServlet {
     private static final long serialVersionUID = 1L;
 
+    @Override
     public void doGet(HttpServletRequest request, 
                       HttpServletResponse response) throws IOException {
       response.setContentType("text/plain");

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -928,6 +928,7 @@ public class DataNode extends Configured
     try {
       return loginUgi
           .doAs(new PrivilegedExceptionAction<InterDatanodeProtocol>() {
+            @Override
             public InterDatanodeProtocol run() throws IOException {
               return new InterDatanodeProtocolTranslatorPB(addr, loginUgi,
                   conf, NetUtils.getDefaultSocketFactory(conf), socketTimeout);
@@ -1367,6 +1368,7 @@ public class DataNode extends Configured
     /**
      * Do the deed, write the bytes
      */
+    @Override
     public void run() {
       xmitsInProgress.getAndIncrement();
       Socket sock = null;
@@ -1723,6 +1725,7 @@ public class DataNode extends Configured
     
     Daemon d = new Daemon(threadGroup, new Runnable() {
       /** Recover a list of blocks. It is run by the primary datanode. */
+      @Override
       public void run() {
         for(RecoveringBlock b : blocks) {
           try {

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -587,6 +587,7 @@ public class DataStorage extends Storage {
 
     // 2. delete finalized.tmp dir in a separate thread
     new Daemon(new Runnable() {
+        @Override
         public void run() {
           try {
             deleteDir(tmpDir);
@@ -595,6 +596,7 @@ public class DataStorage extends Storage {
           }
           LOG.info("Finalize upgrade for " + dataDirPath + " is complete.");
         }
+        @Override
         public String toString() { return "Finalize " + dataDirPath; }
       }).start();
   }
@@ -677,6 +679,7 @@ public class DataStorage extends Storage {
       throw new IOException("Cannot create directory " + to);
     
     String[] blockNames = from.list(new java.io.FilenameFilter() {
+      @Override
       public boolean accept(File dir, String name) {
         return name.startsWith(BLOCK_FILE_PREFIX);
       }
@@ -694,6 +697,7 @@ public class DataStorage extends Storage {
     
     // Now take care of the rest of the files and subdirectories
     String[] otherNames = from.list(new java.io.FilenameFilter() {
+        @Override
         public boolean accept(File dir, String name) {
           return name.startsWith(BLOCK_SUBDIR_PREFIX) 
             || name.startsWith(COPY_FILE_PREFIX);

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -145,6 +145,7 @@ class DataXceiver extends Receiver implements Runnable {
   /**
    * Read/write data from/to the DataXceiverServer.
    */
+  @Override
   public void run() {
     int opsProcessed = 0;
     Op op = null;

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java

@@ -60,6 +60,7 @@ public class DatanodeJspHelper {
                                                  InterruptedException {
     return
       user.doAs(new PrivilegedExceptionAction<DFSClient>() {
+        @Override
         public DFSClient run() throws IOException {
           return new DFSClient(NetUtils.createSocketAddr(addr), conf);
         }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java

@@ -88,6 +88,7 @@ public class DirectoryScanner implements Runnable {
       this.bpid = bpid;
     }
     
+    @Override
     public String toString() {
       return "BlockPool " + bpid
       + " Total blocks: " + totalBlocks + ", missing metadata files:"

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeManagerDatanode.java

@@ -47,6 +47,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
     this.bpid = bpid;
   }
 
+  @Override
   public HdfsServerConstants.NodeType getType() {
     return HdfsServerConstants.NodeType.DATA_NODE;
   }
@@ -71,6 +72,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
    * @return true if distributed upgrade is required or false otherwise
    * @throws IOException
    */
+  @Override
   public synchronized boolean startUpgrade() throws IOException {
     if(upgradeState) {  // upgrade is already in progress
       assert currentUpgrades != null : 
@@ -134,6 +136,7 @@ class UpgradeManagerDatanode extends UpgradeManager {
         + "The upgrade object is not defined.");
   }
 
+  @Override
   public synchronized void completeUpgrade() throws IOException {
     assert currentUpgrades != null : 
       "UpgradeManagerDatanode.currentUpgrades is null.";

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/UpgradeObjectDatanode.java

@@ -36,6 +36,7 @@ public abstract class UpgradeObjectDatanode extends UpgradeObject implements Run
   private DataNode dataNode = null;
   private String bpid = null;
 
+  @Override
   public HdfsServerConstants.NodeType getType() {
     return HdfsServerConstants.NodeType.DATA_NODE;
   }
@@ -96,6 +97,7 @@ public abstract class UpgradeObjectDatanode extends UpgradeObject implements Run
     throw new IOException(errorMsg);
   }
 
+  @Override
   public void run() {
     assert dataNode != null : "UpgradeObjectDatanode.dataNode is null";
     while(dataNode.shouldRun) {
@@ -132,6 +134,7 @@ public abstract class UpgradeObjectDatanode extends UpgradeObject implements Run
    * The data-node needs to re-confirm with the name-node that the upgrade
    * is complete while other nodes are still upgrading.
    */
+  @Override
   public UpgradeCommand completeUpgrade() throws IOException {
     return new UpgradeCommand(UpgradeCommand.UC_ACTION_REPORT_STATUS,
                               getVersion(), (short)100);

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/fsdataset/impl/FsDatasetImpl.java

@@ -264,6 +264,7 @@ class FsDatasetImpl implements FsDatasetSpi<FsVolumeImpl> {
   /**
    * Return the number of failed volumes in the FSDataset.
    */
+  @Override
   public int getNumFailedVolumes() {
     return volumes.numberOfFailedVolumes();
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CancelDelegationTokenServlet.java

@@ -68,6 +68,7 @@ public class CancelDelegationTokenServlet extends DfsServlet {
     
     try {
       ugi.doAs(new PrivilegedExceptionAction<Void>() {
+        @Override
         public Void run() throws Exception {
           nn.getRpcServer().cancelDelegationToken(token);
           return null;

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/CheckpointSignature.java

@@ -72,6 +72,7 @@ public class CheckpointSignature extends StorageInfo
    * Get the cluster id from CheckpointSignature
    * @return the cluster id
    */
+  @Override
   public String getClusterID() {
     return clusterID;
   }
@@ -101,6 +102,7 @@ public class CheckpointSignature extends StorageInfo
     this.blockpoolID = blockpoolID;
   }
   
+  @Override
   public String toString() {
     return String.valueOf(layoutVersion) + FIELD_SEPARATOR
          + String.valueOf(namespaceID) + FIELD_SEPARATOR
@@ -133,6 +135,7 @@ public class CheckpointSignature extends StorageInfo
   //
   // Comparable interface
   //
+  @Override
   public int compareTo(CheckpointSignature o) {
     return ComparisonChain.start()
       .compare(layoutVersion, o.layoutVersion)
@@ -145,6 +148,7 @@ public class CheckpointSignature extends StorageInfo
       .result();
   }
 
+  @Override
   public boolean equals(Object o) {
     if (!(o instanceof CheckpointSignature)) {
       return false;
@@ -152,6 +156,7 @@ public class CheckpointSignature extends StorageInfo
     return compareTo((CheckpointSignature)o) == 0;
   }
 
+  @Override
   public int hashCode() {
     return layoutVersion ^ namespaceID ^
             (int)(cTime ^ mostRecentCheckpointTxId ^ curSegmentTxId)

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java

@@ -118,6 +118,7 @@ class Checkpointer extends Daemon {
   //
   // The main work loop
   //
+  @Override
   public void run() {
     // Check the size of the edit log once every 5 minutes.
     long periodMSec = 5 * 60;   // 5 minutes

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java

@@ -656,6 +656,7 @@ class ClusterJspHelper {
       this.value = v;
     }
 
+    @Override
     public String toString() {
       return value;
     }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogInputStream.java

@@ -57,6 +57,7 @@ public abstract class EditLogInputStream implements Closeable {
    * Close the stream.
    * @throws IOException if an error occurred while closing
    */
+  @Override
   public abstract void close() throws IOException;
 
   /** 

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/EditLogOutputStream.java

@@ -74,6 +74,7 @@ public abstract class EditLogOutputStream implements Closeable {
    * @throws IOException if the journal can't be closed,
    *         or if there are unflushed edits
    */
+  @Override
   abstract public void close() throws IOException;
 
   /**

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -187,6 +187,7 @@ public class FSDirectory implements Closeable {
   /**
    * Shutdown the filestore
    */
+  @Override
   public void close() throws IOException {
     fsImage.close();
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -173,6 +173,7 @@ public class FSEditLog  {
 
   // stores the most current transactionId of this thread.
   private static final ThreadLocal<TransactionId> myTransactionId = new ThreadLocal<TransactionId>() {
+    @Override
     protected synchronized TransactionId initialValue() {
       return new TransactionId(Long.MAX_VALUE);
     }

+ 10 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSEditLogOp.java

@@ -183,6 +183,7 @@ public abstract class FSEditLogOp {
       return (T)this;
     }
     
+    @Override
     public String getPath() {
       return path;
     }
@@ -216,6 +217,7 @@ public abstract class FSEditLogOp {
       return (T)this;
     }
     
+    @Override
     public Block[] getBlocks() {
       return blocks;
     }
@@ -409,6 +411,7 @@ public abstract class FSEditLogOp {
       return (AddOp)cache.get(OP_ADD);
     }
 
+    @Override
     public boolean shouldCompleteLastBlock() {
       return false;
     }
@@ -431,6 +434,7 @@ public abstract class FSEditLogOp {
       return (CloseOp)cache.get(OP_CLOSE);
     }
 
+    @Override
     public boolean shouldCompleteLastBlock() {
       return true;
     }
@@ -462,6 +466,7 @@ public abstract class FSEditLogOp {
       return this;
     }
     
+    @Override
     public String getPath() {
       return path;
     }
@@ -471,6 +476,7 @@ public abstract class FSEditLogOp {
       return this;
     }
     
+    @Override
     public Block[] getBlocks() {
       return blocks;
     }
@@ -2082,6 +2088,7 @@ public abstract class FSEditLogOp {
       return (LogSegmentOp)cache.get(code);
     }
 
+    @Override
     public void readFields(DataInputStream in, int logVersion)
         throws IOException {
       // no data stored in these ops yet
@@ -2174,6 +2181,7 @@ public abstract class FSEditLogOp {
       WritableFactories.setFactory
         (BlockTwo.class,
          new WritableFactory() {
+           @Override
            public Writable newInstance() { return new BlockTwo(); }
          });
     }
@@ -2186,11 +2194,13 @@ public abstract class FSEditLogOp {
     /////////////////////////////////////
     // Writable
     /////////////////////////////////////
+    @Override
     public void write(DataOutput out) throws IOException {
       out.writeLong(blkid);
       out.writeLong(len);
     }
 
+    @Override
     public void readFields(DataInput in) throws IOException {
       this.blkid = in.readLong();
       this.len = in.readLong();

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -785,6 +785,7 @@ public class FSImage implements Closeable {
       this.sd = sd;
     }
 
+    @Override
     public void run() {
       try {
         saveFSImage(context, sd);
@@ -798,6 +799,7 @@ public class FSImage implements Closeable {
       }
     }
     
+    @Override
     public String toString() {
       return "FSImageSaver for " + sd.getRoot() +
              " of type " + sd.getStorageDirType();
@@ -1081,6 +1083,7 @@ public class FSImage implements Closeable {
     }
   }
 
+  @Override
   synchronized public void close() throws IOException {
     if (editLog != null) { // 2NN doesn't have any edit log
       getEditLog().close();

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageTransactionalStorageInspector.java

@@ -114,6 +114,7 @@ class FSImageTransactionalStorageInspector extends FSImageStorageInspector {
    * 
    * @throws FileNotFoundException if not images are found.
    */
+  @Override
   FSImageFile getLatestImage() throws IOException {
     if (foundImages.isEmpty()) {
       throw new FileNotFoundException("No valid image files found");

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -233,6 +233,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
 
   private static final ThreadLocal<StringBuilder> auditBuffer =
     new ThreadLocal<StringBuilder>() {
+      @Override
       protected StringBuilder initialValue() {
         return new StringBuilder();
       }
@@ -4140,6 +4141,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       
     /**
      */
+    @Override
     public void run() {
       while (fsRunning && (safeMode != null && !safeMode.canLeave())) {
         try {
@@ -4244,6 +4246,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
    * @param deltaSafe the change in number of safe blocks
    * @param deltaTotal the change i nnumber of total blocks expected
    */
+  @Override
   public void adjustSafeModeBlockTotals(int deltaSafe, int deltaTotal) {
     // safeMode is volatile, and may be set to null at any time
     SafeModeInfo safeMode = this.safeMode;
@@ -4966,6 +4969,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       block = b;
     }
     
+    @Override
     public String toString() {
       return block.getBlockName() + "\t" + path;
     }
@@ -5460,6 +5464,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     return blockManager;
   }
   
+  @Override
   public boolean isGenStampInFuture(long genStamp) {
     return (genStamp > getGenerationStamp());
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java

@@ -106,6 +106,7 @@ public class FileDataServlet extends DfsServlet {
    * GET http://<nn>:<port>/data[/<path>] HTTP/1.1
    * }
    */
+  @Override
   public void doGet(final HttpServletRequest request,
       final HttpServletResponse response)
       throws IOException {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java

@@ -339,6 +339,7 @@ class FileJournalManager implements JournalManager {
 
     final static Comparator<EditLogFile> COMPARE_BY_START_TXID 
       = new Comparator<EditLogFile>() {
+      @Override
       public int compare(EditLogFile a, EditLogFile b) {
         return ComparisonChain.start()
         .compare(a.getFirstTxId(), b.getFirstTxId())

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java

@@ -42,6 +42,7 @@ public class FsckServlet extends DfsServlet {
   private static final long serialVersionUID = 1L;
 
   /** Handle fsck request */
+  @Override
   public void doGet(HttpServletRequest request, HttpServletResponse response
       ) throws IOException {
     @SuppressWarnings("unchecked")

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java

@@ -79,6 +79,7 @@ public class GetImageServlet extends HttpServlet {
   private static Set<Long> currentlyDownloadingCheckpoints =
     Collections.<Long>synchronizedSet(new HashSet<Long>());
   
+  @Override
   public void doGet(final HttpServletRequest request,
                     final HttpServletResponse response
                     ) throws ServletException, IOException {

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectory.java

@@ -65,6 +65,7 @@ class INodeDirectory extends INode {
   /**
    * Check whether it's a directory
    */
+  @Override
   public boolean isDirectory() {
     return true;
   }
@@ -422,6 +423,7 @@ class INodeDirectory extends INode {
     return children;
   }
 
+  @Override
   int collectSubtreeBlocksAndClear(List<Block> v) {
     int total = 1;
     if (children == null) {

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeDirectoryWithQuota.java

@@ -71,6 +71,7 @@ class INodeDirectoryWithQuota extends INodeDirectory {
   /** Get this directory's namespace quota
    * @return this directory's namespace quota
    */
+  @Override
   long getNsQuota() {
     return nsQuota;
   }
@@ -78,6 +79,7 @@ class INodeDirectoryWithQuota extends INodeDirectory {
   /** Get this directory's diskspace quota
    * @return this directory's diskspace quota
    */
+  @Override
   long getDsQuota() {
     return dsQuota;
   }

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFile.java

@@ -59,10 +59,12 @@ public class INodeFile extends INode implements BlockCollection {
    * Since this is a file,
    * the {@link FsAction#EXECUTE} action, if any, is ignored.
    */
+  @Override
   void setPermission(FsPermission permission) {
     super.setPermission(permission.applyUMask(UMASK));
   }
 
+  @Override
   boolean isDirectory() {
     return false;
   }
@@ -138,6 +140,7 @@ public class INodeFile extends INode implements BlockCollection {
     this.blocks[idx] = blk;
   }
 
+  @Override
   int collectSubtreeBlocksAndClear(List<Block> v) {
     parent = null;
     if(blocks != null && v != null) {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeFileUnderConstruction.java

@@ -147,6 +147,7 @@ class INodeFileUnderConstruction extends INodeFile implements MutableBlockCollec
    * Convert the last block of the file to an under-construction block.
    * Set its locations.
    */
+  @Override
   public BlockInfoUnderConstruction setLastBlock(BlockInfo lastBlock,
                                           DatanodeDescriptor[] targets)
   throws IOException {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/INodeSymlink.java

@@ -40,6 +40,7 @@ public class INodeSymlink extends INode {
     setAccessTime(atime);
   }
 
+  @Override
   public boolean isLink() {
     return true;
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java

@@ -83,6 +83,7 @@ public interface JournalManager extends Closeable {
   /**
    * Close the journal manager, freeing any resources it may hold.
    */
+  @Override
   void close() throws IOException;
   
   /** 

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/LeaseManager.java

@@ -390,6 +390,7 @@ public class LeaseManager {
     final String name = getClass().getSimpleName();
 
     /** Check leases periodically. */
+    @Override
     public void run() {
       for(; shouldRunMonitor && fsnamesystem.isRunning(); ) {
         try {

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ListPathsServlet.java

@@ -54,6 +54,7 @@ public class ListPathsServlet extends DfsServlet {
 
   public static final ThreadLocal<SimpleDateFormat> df =
     new ThreadLocal<SimpleDateFormat>() {
+      @Override
       protected SimpleDateFormat initialValue() {
         return HftpFileSystem.getDateFormat();
       }
@@ -128,6 +129,7 @@ public class ListPathsServlet extends DfsServlet {
    *   </listing>
    * }
    */
+  @Override
   public void doGet(HttpServletRequest request, HttpServletResponse response)
     throws ServletException, IOException {
     final PrintWriter out = response.getWriter();

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java

@@ -101,10 +101,12 @@ public class NNStorage extends Storage implements Closeable,
     EDITS,
     IMAGE_AND_EDITS;
 
+    @Override
     public StorageDirType getStorageDirType() {
       return this;
     }
 
+    @Override
     public boolean isOfType(StorageDirType type) {
       if ((this == IMAGE_AND_EDITS) && (type == IMAGE || type == EDITS))
         return true;

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeResourceChecker.java

@@ -119,6 +119,7 @@ public class NameNodeResourceChecker {
     Collection<URI> localEditDirs = Collections2.filter(
         FSNamesystem.getNamespaceEditsDirs(conf),
         new Predicate<URI>() {
+          @Override
           public boolean apply(URI input) {
             if (input.getScheme().equals(NNStorage.LOCAL_URI_SCHEME)) {
               return true;

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -369,6 +369,7 @@ class NamenodeJspHelper {
       final UserGroupInformation ugi) throws IOException, InterruptedException {
     Token<DelegationTokenIdentifier> token = ugi
         .doAs(new PrivilegedExceptionAction<Token<DelegationTokenIdentifier>>() {
+          @Override
           public Token<DelegationTokenIdentifier> run() throws IOException {
             return nn.getDelegationToken(new Text(ugi.getUserName()));
           }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/RenewDelegationTokenServlet.java

@@ -68,6 +68,7 @@ public class RenewDelegationTokenServlet extends DfsServlet {
     
     try {
       long result = ugi.doAs(new PrivilegedExceptionAction<Long>() {
+        @Override
         public Long run() throws Exception {
           return nn.getRpcServer().renewDelegationToken(token);
         }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -282,6 +282,7 @@ public class SecondaryNameNode implements Runnable {
     }
   }
 
+  @Override
   public void run() {
     SecurityUtil.doAsLoginUserOrFatal(
         new PrivilegedAction<Object>() {

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java

@@ -59,6 +59,7 @@ public class StreamFile extends DfsServlet {
     return DatanodeJspHelper.getDFSClient(request, datanode, conf, ugi);
   }
 
+  @Override
   @SuppressWarnings("unchecked")
   public void doGet(HttpServletRequest request, HttpServletResponse response)
     throws ServletException, IOException {

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeManagerNamenode.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.hdfs.server.protocol.UpgradeCommand;
  * and updates its status.
  */
 class UpgradeManagerNamenode extends UpgradeManager {
+  @Override
   public HdfsServerConstants.NodeType getType() {
     return HdfsServerConstants.NodeType.NAME_NODE;
   }
@@ -55,6 +56,7 @@ class UpgradeManagerNamenode extends UpgradeManager {
    * @return true if distributed upgrade is required or false otherwise
    * @throws IOException
    */
+  @Override
   public synchronized boolean startUpgrade() throws IOException {
     if(!upgradeState) {
       initializeUpgrade();
@@ -108,6 +110,7 @@ class UpgradeManagerNamenode extends UpgradeManager {
     return reply;
   }
 
+  @Override
   public synchronized void completeUpgrade() throws IOException {
     // set and write new upgrade state into disk
     setUpgradeState(false, HdfsConstants.LAYOUT_VERSION);

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/UpgradeObjectNamenode.java

@@ -44,12 +44,14 @@ public abstract class UpgradeObjectNamenode extends UpgradeObject {
   public abstract UpgradeCommand processUpgradeCommand(UpgradeCommand command
                                                ) throws IOException;
 
+  @Override
   public HdfsServerConstants.NodeType getType() {
     return HdfsServerConstants.NodeType.NAME_NODE;
   }
 
   /**
    */
+  @Override
   public UpgradeCommand startUpgrade() throws IOException {
     // broadcast that data-nodes must start the upgrade
     return new UpgradeCommand(UpgradeCommand.UC_ACTION_START_UPGRADE,

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/BootstrapStandby.java

@@ -86,6 +86,7 @@ public class BootstrapStandby implements Tool, Configurable {
   static final int ERR_CODE_ALREADY_FORMATTED = 5;
   static final int ERR_CODE_LOGS_UNAVAILABLE = 6; 
 
+  @Override
   public int run(String[] args) throws Exception {
     parseArgs(args);
     parseConfAndFindOtherNN();

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NamespaceInfo.java

@@ -77,6 +77,7 @@ public class NamespaceInfo extends StorageInfo {
     return softwareVersion;
   }
 
+  @Override
   public String toString(){
     return super.toString() + ";bpid=" + blockPoolID;
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/NodeRegistration.java

@@ -42,5 +42,6 @@ public interface NodeRegistration {
    */
   public int getVersion();
 
+  @Override
   public String toString();
 }

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/protocol/ReceivedDeletedBlockInfo.java

@@ -82,6 +82,7 @@ public class ReceivedDeletedBlockInfo {
     return status;
   }
 
+  @Override
   public boolean equals(Object o) {
     if (!(o instanceof ReceivedDeletedBlockInfo)) {
       return false;
@@ -93,6 +94,7 @@ public class ReceivedDeletedBlockInfo {
             this.delHints != null && this.delHints.equals(other.delHints));
   }
 
+  @Override
   public int hashCode() {
     assert false : "hashCode not designed";
     return 0; 
@@ -106,6 +108,7 @@ public class ReceivedDeletedBlockInfo {
     return status == BlockStatus.DELETED_BLOCK;
   }
 
+  @Override
   public String toString() {
     return block.toString() + ", status: " + status +
       ", delHint: " + delHints;

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/DFSck.java

@@ -115,6 +115,7 @@ public class DFSck extends Configured implements Tool {
   /**
    * @param args
    */
+  @Override
   public int run(final String[] args) throws IOException {
     if (args.length == 0) {
       printUsage();

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsBinaryLoader.java

@@ -57,6 +57,7 @@ class OfflineEditsBinaryLoader implements OfflineEditsLoader {
   /**
    * Loads edits file, uses visitor to process all elements
    */
+  @Override
   public void loadEdits() throws IOException {
     visitor.start(inputStream.getVersion());
     while (true) {

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineEditsViewer/OfflineEditsXmlLoader.java

@@ -77,6 +77,7 @@ class OfflineEditsXmlLoader
   /**
    * Loads edits file, uses visitor to process all elements
    */
+  @Override
   public void loadEdits() throws IOException {
     try {
       XMLReader xr = XMLReaderFactory.createXMLReader();
@@ -120,6 +121,7 @@ class OfflineEditsXmlLoader
     }
   }
   
+  @Override
   public void startElement (String uri, String name,
       String qName, Attributes atts) {
     switch (state) {
@@ -168,6 +170,7 @@ class OfflineEditsXmlLoader
     }
   }
   
+  @Override
   public void endElement (String uri, String name, String qName) {
     String str = cbuf.toString().trim();
     cbuf = new StringBuffer();
@@ -248,6 +251,7 @@ class OfflineEditsXmlLoader
     }
   }
   
+  @Override
   public void characters (char ch[], int start, int length) {
     cbuf.append(ch, start, length);
   }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/IndentedImageVisitor.java

@@ -59,6 +59,7 @@ class IndentedImageVisitor extends TextWriterImageVisitor {
     write(element + " = " + value + "\n");
   }
 
+  @Override
   void visit(ImageElement element, long value) throws IOException {
     if ((element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_EXPIRY_TIME) || 
         (element == ImageElement.DELEGATION_TOKEN_IDENTIFIER_ISSUE_DATE) || 

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/CyclicIteration.java

@@ -54,6 +54,7 @@ public class CyclicIteration<K, V> implements Iterable<Map.Entry<K, V>> {
   }
 
   /** {@inheritDoc} */
+  @Override
   public Iterator<Map.Entry<K, V>> iterator() {
     return new CyclicIterator();
   }
@@ -89,11 +90,13 @@ public class CyclicIteration<K, V> implements Iterable<Map.Entry<K, V>> {
     }
 
     /** {@inheritDoc} */
+    @Override
     public boolean hasNext() {
       return hasnext;
     }
 
     /** {@inheritDoc} */
+    @Override
     public Map.Entry<K, V> next() {
       if (!hasnext) {
         throw new NoSuchElementException();
@@ -106,6 +109,7 @@ public class CyclicIteration<K, V> implements Iterable<Map.Entry<K, V>> {
     }
 
     /** Not supported */
+    @Override
     public void remove() {
       throw new UnsupportedOperationException("Not supported");
     }

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightHashSet.java

@@ -55,6 +55,7 @@ public class LightWeightHashSet<T> implements Collection<T> {
       this.hashCode = hash;
     }
 
+    @Override
     public String toString() {
       return element.toString();
     }
@@ -142,6 +143,7 @@ public class LightWeightHashSet<T> implements Collection<T> {
    *
    * @return true is set empty, false otherwise
    */
+  @Override
   public boolean isEmpty() {
     return size == 0;
   }
@@ -156,6 +158,7 @@ public class LightWeightHashSet<T> implements Collection<T> {
   /**
    * Return the number of stored elements.
    */
+  @Override
   public int size() {
     return size;
   }
@@ -217,6 +220,7 @@ public class LightWeightHashSet<T> implements Collection<T> {
    * @param toAdd - elements to add.
    * @return true if the set has changed, false otherwise
    */
+  @Override
   public boolean addAll(Collection<? extends T> toAdd) {
     boolean changed = false;
     for (T elem : toAdd) {
@@ -231,6 +235,7 @@ public class LightWeightHashSet<T> implements Collection<T> {
    *
    * @return true if the element was not present in the table, false otherwise
    */
+  @Override
   public boolean add(final T element) {
     boolean added = addElem(element);
     expandIfNecessary();
@@ -270,6 +275,7 @@ public class LightWeightHashSet<T> implements Collection<T> {
    *
    * @return If such element exists, return true. Otherwise, return false.
    */
+  @Override
   @SuppressWarnings("unchecked")
   public boolean remove(final Object key) {
     // validate key
@@ -489,6 +495,7 @@ public class LightWeightHashSet<T> implements Collection<T> {
     }
   }
 
+  @Override
   public Iterator<T> iterator() {
     return new LinkedSetIterator();
   }
@@ -560,6 +567,7 @@ public class LightWeightHashSet<T> implements Collection<T> {
   /**
    * Clear the set. Resize it to the original capacity.
    */
+  @Override
   @SuppressWarnings("unchecked")
   public void clear() {
     this.capacity = this.initialCapacity;

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/LightWeightLinkedSet.java

@@ -47,6 +47,7 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
       this.after = null;
     }
 
+    @Override
     public String toString() {
       return super.toString();
     }
@@ -79,6 +80,7 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
    *
    * @return true if the element was not present in the table, false otherwise
    */
+  @Override
   protected boolean addElem(final T element) {
     // validate element
     if (element == null) {
@@ -118,6 +120,7 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
    *
    * @return Return the entry with the element if exists. Otherwise return null.
    */
+  @Override
   protected DoubleLinkedElement<T> removeElem(final T key) {
     DoubleLinkedElement<T> found = (DoubleLinkedElement<T>) (super
         .removeElem(key));
@@ -162,6 +165,7 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
    *
    * @return first element
    */
+  @Override
   public List<T> pollN(int n) {
     if (n >= size) {
       // if we need to remove all elements then do fast polling
@@ -182,6 +186,7 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
    * link list, don't worry about hashtable - faster version of the parent
    * method.
    */
+  @Override
   public List<T> pollAll() {
     List<T> retList = new ArrayList<T>(size);
     while (head != null) {
@@ -212,6 +217,7 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
     return a;
   }
 
+  @Override
   public Iterator<T> iterator() {
     return new LinkedSetIterator();
   }
@@ -251,6 +257,7 @@ public class LightWeightLinkedSet<T> extends LightWeightHashSet<T> {
   /**
    * Clear the set. Resize it to the original capacity.
    */
+  @Override
   public void clear() {
     super.clear();
     this.head = null;

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/util/XMLUtils.java

@@ -146,6 +146,7 @@ public class XMLUtils {
     /** 
      * Convert a stanza to a human-readable string.
      */
+    @Override
     public String toString() {
       StringBuilder bld = new StringBuilder();
       bld.append("{");

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/resources/PostOpParam.java

@@ -49,6 +49,7 @@ public class PostOpParam extends HttpOpParam<PostOpParam.Op> {
     }
 
     /** @return a URI query string. */
+    @Override
     public String toQueryString() {
       return NAME + "=" + this;
     }

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java

@@ -55,6 +55,7 @@ public class TestFcHdfsCreateMkdir extends
     cluster.shutdown();   
   }
   
+  @Override
   @Before
   public void setUp() throws Exception {
     super.setUp();

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsPermission.java

@@ -55,6 +55,7 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
     cluster.shutdown();   
   }
   
+  @Override
   @Before
   public void setUp() throws Exception {
     super.setUp();
@@ -72,6 +73,7 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
    */
   static final FsPermission FILE_MASK_IGNORE_X_BIT = 
     new FsPermission((short) ~0666);
+  @Override
   FsPermission getFileMask() {
     return FILE_MASK_IGNORE_X_BIT;
   }

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestFcHdfsSymlink.java

@@ -53,18 +53,22 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
   private static WebHdfsFileSystem webhdfs;
 
   
+  @Override
   protected String getScheme() {
     return "hdfs";
   }
 
+  @Override
   protected String testBaseDir1() throws IOException {
     return "/test1";
   }
   
+  @Override
   protected String testBaseDir2() throws IOException {
     return "/test2";
   }
 
+  @Override
   protected URI testURI() {
     return cluster.getURI(0);
   }

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java

@@ -35,6 +35,7 @@ public class TestGlobPaths extends TestCase {
       this.regex = regex;
     }
 
+    @Override
     public boolean accept(Path path) {
       return path.toString().matches(regex);
     }
@@ -47,6 +48,7 @@ public class TestGlobPaths extends TestCase {
   static final String USER_DIR = "/user/"+System.getProperty("user.name");
   private Path[] path = new Path[NUM_OF_PATHS];
   
+  @Override
   protected void setUp() throws Exception {
     try {
       Configuration conf = new HdfsConfiguration();
@@ -57,6 +59,7 @@ public class TestGlobPaths extends TestCase {
     }
   }
   
+  @Override
   protected void tearDown() throws Exception {
     if(dfsCluster!=null) {
       dfsCluster.shutdown();

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java

@@ -75,6 +75,7 @@ public class TestHDFSFileContextMainOperations extends
     cluster.shutdown();   
   }
   
+  @Override
   @Before
   public void setUp() throws Exception {
     super.setUp();

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemAtHdfsRoot.java

@@ -62,6 +62,7 @@ public class TestViewFileSystemAtHdfsRoot extends ViewFileSystemBaseTest {
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     fsTarget = fHdfs;

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFileSystemHdfs.java

@@ -80,6 +80,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     // create the test root on local_fs
@@ -89,6 +90,7 @@ public class TestViewFileSystemHdfs extends ViewFileSystemBaseTest {
     super.setUp();
   }
 
+  @Override
   @After
   public void tearDown() throws Exception {
     super.tearDown();

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/viewfs/TestViewFsAtHdfsRoot.java

@@ -61,6 +61,7 @@ public class TestViewFsAtHdfsRoot extends ViewFsBaseTest {
     cluster.shutdown();   
   }
 
+  @Override
   @Before
   public void setUp() throws Exception {
     // create the test root on local_fs

Einige Dateien werden nicht angezeigt, da zu viele Dateien in diesem Diff geändert wurden.