Kaynağa Gözat

HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts. Contributed by Hari Mankude

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1203950 13f79535-47bb-0310-9956-ffa450edef68
Eli Collins 13 yıl önce
ebeveyn
işleme
b7cd8c0f86

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -52,6 +52,8 @@ Trunk (unreleased changes)
 
     HDFS-2334. Add Closeable to JournalManager. (Ivan Kelly via jitendra)
 
+    HDFS-2564. Cleanup unnecessary exceptions thrown and unnecessary casts.
+    (Hari Mankude via eli)
 
   OPTIMIZATIONS
     HDFS-2477. Optimize computing the diff between a block report and the

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -543,7 +543,7 @@ public class DFSInputStream extends FSInputStream {
           if (pos > blockEnd) {
             currentNode = blockSeekTo(pos);
           }
-          int realLen = (int) Math.min((long) len, (blockEnd - pos + 1L));
+          int realLen = (int) Math.min(len, (blockEnd - pos + 1L));
           int result = readBuffer(buf, off, realLen, corruptedBlockMap);
           
           if (result >= 0) {

+ 6 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -239,8 +239,7 @@ public class DataNode extends Configured
    * Use {@link NetUtils#createSocketAddr(String)} instead.
    */
   @Deprecated
-  public static InetSocketAddress createSocketAddr(String target
-                                                   ) throws IOException {
+  public static InetSocketAddress createSocketAddr(String target) {
     return NetUtils.createSocketAddr(target);
   }
   
@@ -334,14 +333,14 @@ public class DataNode extends Configured
       }
     }
     
-    void joinAll() throws InterruptedException {
+    void joinAll() {
       for (BPOfferService bpos: this.getAllNamenodeThreads()) {
         bpos.join();
       }
     }
     
     void refreshNamenodes(Configuration conf)
-        throws IOException, InterruptedException {
+        throws IOException {
       LOG.info("Refresh request received for nameservices: "
           + conf.get(DFS_FEDERATION_NAMESERVICES));
       List<InetSocketAddress> newAddresses = 
@@ -859,8 +858,7 @@ public class DataNode extends Configured
 
     private void connectToNNAndHandshake() throws IOException {
       // get NN proxy
-      bpNamenode = 
-        (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class,
+      bpNamenode = (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class,
             DatanodeProtocol.versionID, nnAddr, dn.getConf());
 
       // First phase of the handshake with NN - get the namespace
@@ -2120,7 +2118,7 @@ public class DataNode extends Configured
      * entire target list, the block, and the data.
      */
     DataTransfer(DatanodeInfo targets[], ExtendedBlock b, BlockConstructionStage stage,
-        final String clientname) throws IOException {
+        final String clientname) {
       if (DataTransferProtocol.LOG.isDebugEnabled()) {
         DataTransferProtocol.LOG.debug(getClass().getSimpleName() + ": "
             + b + " (numBytes=" + b.getNumBytes() + ")"
@@ -2896,13 +2894,7 @@ public class DataNode extends Configured
   }
   
   public void refreshNamenodes(Configuration conf) throws IOException {
-    try {
-      blockPoolManager.refreshNamenodes(conf);
-    } catch (InterruptedException ex) {
-      IOException eio = new IOException();
-      eio.initCause(ex);
-      throw eio;
-    }
+    blockPoolManager.refreshNamenodes(conf);
   }
 
   @Override //ClientDatanodeProtocol

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

@@ -459,7 +459,7 @@ public class FSDataset implements FSDatasetInterface {
         long metaFileLen = metaFile.length();
         int crcHeaderLen = DataChecksum.getChecksumHeaderSize();
         if (!blockFile.exists() || blockFileLen == 0 ||
-            !metaFile.exists() || metaFileLen < (long)crcHeaderLen) {
+            !metaFile.exists() || metaFileLen < crcHeaderLen) {
           return 0;
         }
         checksumIn = new DataInputStream(
@@ -578,7 +578,7 @@ public class FSDataset implements FSDatasetInterface {
      * reserved capacity.
      * @return the unreserved number of bytes left in this filesystem. May be zero.
      */
-    long getCapacity() throws IOException {
+    long getCapacity() {
       long remaining = usage.getCapacity() - reserved;
       return remaining > 0 ? remaining : 0;
     }
@@ -818,7 +818,7 @@ public class FSDataset implements FSDatasetInterface {
       return dfsUsed;
     }
 
-    private long getCapacity() throws IOException {
+    private long getCapacity() {
       long capacity = 0L;
       for (FSVolume vol : volumes) {
         capacity += vol.getCapacity();
@@ -1667,7 +1667,7 @@ public class FSDataset implements FSDatasetInterface {
     }
     if (!oldmeta.renameTo(newmeta)) {
       replicaInfo.setGenerationStamp(oldGS); // restore old GS
-      throw new IOException("Block " + (Block)replicaInfo + " reopen failed. " +
+      throw new IOException("Block " + replicaInfo + " reopen failed. " +
                             " Unable to move meta file  " + oldmeta +
                             " to " + newmeta);
     }
@@ -2018,7 +2018,7 @@ public class FSDataset implements FSDatasetInterface {
   /**
    * Find the file corresponding to the block and return it if it exists.
    */
-  File validateBlockFile(String bpid, Block b) throws IOException {
+  File validateBlockFile(String bpid, Block b) {
     //Should we check for metadata file too?
     File f = getFile(bpid, b);
     
@@ -2327,7 +2327,7 @@ public class FSDataset implements FSDatasetInterface {
         if (datanode.blockScanner != null) {
           datanode.blockScanner.addBlock(new ExtendedBlock(bpid, diskBlockInfo));
         }
-        DataNode.LOG.warn("Added missing block to memory " + (Block)diskBlockInfo);
+        DataNode.LOG.warn("Added missing block to memory " + diskBlockInfo);
         return;
       }
       /*
@@ -2600,7 +2600,7 @@ public class FSDataset implements FSDatasetInterface {
    * get list of all bpids
    * @return list of bpids
    */
-  public String [] getBPIdlist() throws IOException {
+  public String [] getBPIdlist() {
     return volumeMap.getBlockPoolList();
   }
   

+ 8 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -1421,7 +1421,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     try {
       lb = startFileInternal(src, null, holder, clientMachine, 
                         EnumSet.of(CreateFlag.APPEND), 
-                        false, blockManager.maxReplication, (long)0);
+                        false, blockManager.maxReplication, 0);
     } finally {
       writeUnlock();
     }
@@ -1504,7 +1504,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       fileLength = pendingFile.computeContentSummary().getLength();
       blockSize = pendingFile.getPreferredBlockSize();
       clientNode = pendingFile.getClientNode();
-      replication = (int)pendingFile.getReplication();
+      replication = pendingFile.getReplication();
     } finally {
       writeUnlock();
     }
@@ -2214,6 +2214,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     // If the penultimate block is not COMPLETE, then it must be COMMITTED.
     if(nrCompleteBlocks < nrBlocks - 2 ||
        nrCompleteBlocks == nrBlocks - 2 &&
+         curBlock != null &&
          curBlock.getBlockUCState() != BlockUCState.COMMITTED) {
       final String message = "DIR* NameSystem.internalReleaseLease: "
         + "attempt to release a create lock on "
@@ -2299,7 +2300,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
   }
   
   Lease reassignLeaseInternal(Lease lease, String src, String newHolder,
-      INodeFileUnderConstruction pendingFile) throws IOException {
+      INodeFileUnderConstruction pendingFile) {
     assert hasWriteLock();
     pendingFile.setClientName(newHolder);
     return leaseManager.reassignLease(lease, src, newHolder);
@@ -2402,7 +2403,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
                 newtargets[i]);
           }
         }
-        if (closeFile) {
+        if ((closeFile) && (descriptors != null)) {
           // the file is getting closed. Insert block locations into blockManager.
           // Otherwise fsck will report these blocks as MISSING, especially if the
           // blocksReceived from Datanodes take a long time to arrive.
@@ -3088,7 +3089,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
       this.blockTotal = total;
       this.blockThreshold = (int) (blockTotal * threshold);
       this.blockReplQueueThreshold = 
-        (int) (((double) blockTotal) * replQueueThreshold);
+        (int) (blockTotal * replQueueThreshold);
       checkMode();
     }
       
@@ -3098,7 +3099,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
      * @param replication current replication 
      */
     private synchronized void incrementSafeBlockCount(short replication) {
-      if ((int)replication == safeReplication)
+      if (replication == safeReplication)
         this.blockSafe++;
       checkMode();
     }
@@ -3230,6 +3231,7 @@ public class FSNamesystem implements Namesystem, FSClusterStats,
     /**
      * Checks consistency of the class state.
      * This is costly and currently called only in assert.
+     * @throws IOException 
      */
     private boolean isConsistent() throws IOException {
       if (blockTotal == -1 && blockSafe == -1) {

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -258,11 +258,12 @@ public class NameNode {
    * If the service rpc is not configured returns null
    */
   protected InetSocketAddress getServiceRpcServerAddress(Configuration conf)
-    throws IOException {
+      throws IOException {
     return NameNode.getServiceAddress(conf, false);
   }
 
-  protected InetSocketAddress getRpcServerAddress(Configuration conf) throws IOException {
+  protected InetSocketAddress getRpcServerAddress(Configuration conf)
+      throws IOException {
     return getAddress(conf);
   }