瀏覽代碼

HADOOP-18284. Remove Unnecessary semicolon ';' (#4422). Contributed by fanshilun.

slfan1989 2 年之前
父節點
當前提交
073b8ea1d5
共有 25 個文件被更改,包括 37 次插入39 次删除
  1. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
  2. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java
  3. 1 1
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java
  4. 8 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java
  5. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
  6. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java
  7. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
  8. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java
  9. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java
  10. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
  11. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java
  12. 4 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java
  13. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java
  14. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java
  15. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java
  16. 2 4
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java
  17. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestValueIterReset.java
  18. 1 1
      hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java
  19. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
  20. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationAllocationStatePBImpl.java
  21. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ConfiguredYarnAuthorizer.java
  22. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java
  23. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java
  24. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java
  25. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -164,7 +164,7 @@ public class TestLocalFileSystem {
   public void testSyncable() throws IOException {
     FileSystem fs = fileSys.getRawFileSystem();
     Path file = new Path(TEST_ROOT_DIR, "syncable");
-    FSDataOutputStream out = fs.create(file);;
+    FSDataOutputStream out = fs.create(file);
     final int bytesWritten = 1;
     byte[] expectedBuf = new byte[] {'0', '1', '2', '3'};
     try {

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/io/file/tfile/TestTFileSeqFileComparison.java

@@ -516,7 +516,7 @@ public class TestTFileSeqFileComparison {
   }
 
   private static class MyOptions {
-    String rootDir = GenericTestUtils.getTestDir().getAbsolutePath();;
+    String rootDir = GenericTestUtils.getTestDir().getAbsolutePath();
     String compress = "gz";
     String format = "tfile";
     int dictSize = 1000;

+ 1 - 1
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/ipc/RPCCallBenchmark.java

@@ -289,7 +289,7 @@ public class RPCCallBenchmark extends TestRpcBase implements Tool {
           long cpuNanosClient = getTotalCpuTime(ctx.getTestThreads());
           long cpuNanosServer = -1;
           if (server != null) {
-            cpuNanosServer = getTotalCpuTime(server.getHandlers());; 
+            cpuNanosServer = getTotalCpuTime(server.getHandlers());
           }
           System.out.println("====== Results ======");
           System.out.println("Options:\n" + opts);

+ 8 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/security/TestWhitelistBasedResolver.java

@@ -111,16 +111,16 @@ public class TestWhitelistBasedResolver {
     assertEquals (wqr.getDefaultProperties(),
         wqr.getServerProperties(InetAddress.getByName("10.119.103.112")));
 
-    assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113"));
+    assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.119.103.113"));
 
-    assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121"));
+    assertEquals(wqr.getDefaultProperties(), wqr.getServerProperties("10.221.103.121"));
 
-    assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0"));
-    assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.222.103.121"));
-    assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0"));
-    assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.221"));
-    assertEquals (SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222"));
-    assertEquals (wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1"));;
+    assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.221.104.0"));
+    assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.222.103.121"));
+    assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.223.104.0"));
+    assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.221"));
+    assertEquals(SASL_PRIVACY_PROPS, wqr.getServerProperties("10.113.221.222"));
+    assertEquals(wqr.getDefaultProperties(), wqr.getServerProperties("127.0.0.1"));
 
     TestFileBasedIPList.removeFile("fixedwhitelist.txt");
     TestFileBasedIPList.removeFile("variablewhitelist.txt");

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java

@@ -745,7 +745,7 @@ public class IPCLoggerChannel implements AsyncLogger {
       URI uri = URI.create(ret.getFromURL());
       httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort());
     } else {
-      httpServerURL = getHttpServerURI("http", ret.getHttpPort());;
+      httpServerURL = getHttpServerURI("http", ret.getHttpPort());
     }
   }
 
@@ -754,7 +754,7 @@ public class IPCLoggerChannel implements AsyncLogger {
       URI uri = URI.create(ret.getFromURL());
       httpServerURL = getHttpServerURI(uri.getScheme(), uri.getPort());
     } else {
-      httpServerURL = getHttpServerURI("http", ret.getHttpPort());;
+      httpServerURL = getHttpServerURI("http", ret.getHttpPort());
     }
   }
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -803,7 +803,7 @@ public class SecondaryNameNode implements Runnable,
       geteditsizeOpt = new Option("geteditsize",
         "return the number of uncheckpointed transactions on the NameNode");
       checkpointOpt = OptionBuilder.withArgName("force")
-        .hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");;
+        .hasOptionalArg().withDescription("checkpoint on startup").create("checkpoint");
       formatOpt = new Option("format", "format the local storage during startup");
       helpOpt = new Option("h", "help", false, "get help information");
       

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java

@@ -99,7 +99,7 @@ public class TestAddBlockRetry {
                                             HdfsConstants.GRANDFATHER_INODE_ID,
                                             "clientName", null, onRetryBlock);
     } finally {
-      ns.readUnlock();;
+      ns.readUnlock();
     }
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
         ns.getBlockManager(), src, null, null, null, r);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckPointForSecurityTokens.java

@@ -91,7 +91,7 @@ public class TestCheckPointForSecurityTokens {
         log.scanLog(Long.MAX_VALUE, true);
         long numTransactions = (log.getLastTxId() - log.getFirstTxId()) + 1;
         assertEquals("In-progress log " + log + " should have 5 transactions",
-                     5, numTransactions);;
+                     5, numTransactions);
       }
 
       // Saving image in safe mode should succeed

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestCheckpoint.java

@@ -2613,7 +2613,7 @@ public class TestCheckpoint {
   }
 
   private static CheckpointStorage spyOnSecondaryImage(SecondaryNameNode secondary1) {
-    CheckpointStorage spy = Mockito.spy((CheckpointStorage)secondary1.getFSImage());;
+    CheckpointStorage spy = Mockito.spy((CheckpointStorage)secondary1.getFSImage());
     secondary1.setFSImage(spy);
     return spy;
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java

@@ -150,7 +150,7 @@ public class TestFavoredNodesEndToEnd {
     d.stopDecommission();
 
     BlockLocation[] locations = getBlockLocations(p);
-    Assert.assertEquals(replication, locations[0].getNames().length);;
+    Assert.assertEquals(replication, locations[0].getNames().length);
     //also make sure that the datanode[0] is not in the list of hosts
     for (int i = 0; i < replication; i++) {
       final String loc = locations[0].getNames()[i];

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestDelegationTokensWithHA.java

@@ -317,7 +317,7 @@ public class TestDelegationTokensWithHA {
     longUgi.doAs(new PrivilegedExceptionAction<Void>() {
       @Override
       public Void run() throws Exception {
-        token.cancel(conf);;
+        token.cancel(conf);
         return null;
       }
     });

+ 4 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/MapTask.java

@@ -150,8 +150,8 @@ public class MapTask extends Task {
    * @param <V>
    */
   class TrackedRecordReader<K, V> 
-      implements RecordReader<K,V> {
-    private RecordReader<K,V> rawIn;
+      implements RecordReader<K, V> {
+    private RecordReader<K, V> rawIn;
     private Counters.Counter fileInputByteCounter;
     private Counters.Counter inputRecordCounter;
     private TaskReporter reporter;
@@ -240,7 +240,7 @@ public class MapTask extends Task {
    * This class skips the records based on the failed ranges from previous 
    * attempts.
    */
-  class SkippingRecordReader<K, V> extends TrackedRecordReader<K,V> {
+  class SkippingRecordReader<K, V> extends TrackedRecordReader<K, V> {
     private SkipRangeIterator skipIt;
     private SequenceFile.Writer skipWriter;
     private boolean toWriteSkipRecs;
@@ -930,7 +930,7 @@ public class MapTask extends Task {
     // spill accounting
     private int maxRec;
     private int softLimit;
-    boolean spillInProgress;;
+    boolean spillInProgress;
     int bufferRemaining;
     volatile Throwable sortSpillException = null;
 

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/aggregate/LongValueMax.java

@@ -96,7 +96,7 @@ public class LongValueMax implements ValueAggregator<String> {
    *         expected to be used by the a combiner.
    */
   public ArrayList<String> getCombinerOutput() {
-    ArrayList<String> retv = new ArrayList<String>(1);;
+    ArrayList<String> retv = new ArrayList<String>(1);
     retv.add("" + maxVal);
     return retv;
   }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/webapp/HsTasksBlock.java

@@ -123,7 +123,7 @@ public class HsTasksBlock extends HtmlBlock {
       long sortFinishTime = -1;
       long attemptFinishTime = -1;
       long elapsedShuffleTime = -1;
-      long elapsedSortTime = -1;;
+      long elapsedSortTime = -1;
       long elapsedReduceTime = -1;
       long attemptElapsed = -1;
       TaskAttempt successful = info.getSuccessful();

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/BigMapOutput.java

@@ -70,7 +70,7 @@ public class BigMapOutput extends Configured implements Tool {
                                 BytesWritable.class, BytesWritable.class,
                                 CompressionType.NONE);
     long numBytesToWrite = fileSizeInMB * 1024 * 1024;
-    int minKeySize = conf.getInt(MIN_KEY, 10);;
+    int minKeySize = conf.getInt(MIN_KEY, 10);
     int keySizeRange = 
       conf.getInt(MAX_KEY, 1000) - minKeySize;
     int minValueSize = conf.getInt(MIN_VALUE, 0);

+ 2 - 4
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java

@@ -179,8 +179,7 @@ public abstract class NotificationTestCase extends HadoopTestCase {
 
     // Hack for local FS that does not have the concept of a 'mounting point'
     if (isLocalFS()) {
-      String localPathRoot = System.getProperty("test.build.data","/tmp")
-        .toString().replace(' ', '+');;
+      String localPathRoot = System.getProperty("test.build.data", "/tmp").replace(' ', '+');
       inDir = new Path(localPathRoot, inDir);
       outDir = new Path(localPathRoot, outDir);
     }
@@ -217,8 +216,7 @@ public abstract class NotificationTestCase extends HadoopTestCase {
 
     // Hack for local FS that does not have the concept of a 'mounting point'
     if (isLocalFS()) {
-      String localPathRoot = System.getProperty("test.build.data","/tmp")
-        .toString().replace(' ', '+');;
+      String localPathRoot = System.getProperty("test.build.data", "/tmp").replace(' ', '+');
       inDir = new Path(localPathRoot, inDir);
       outDir = new Path(localPathRoot, outDir);
     }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestValueIterReset.java

@@ -437,7 +437,7 @@ public class TestValueIterReset {
     int count = 0;
 
     while (values.hasNext()) {
-      i = values.next();;
+      i = values.next();
       LOG.info(key + ":" + i);
       
       if (count == 5) {

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-examples/src/main/java/org/apache/hadoop/examples/pi/math/LongLong.java

@@ -84,7 +84,7 @@ class LongLong {
     final long v = x1*y1;
 
     final long tmp = (t - u)>>>1;
-    result.d0 = ((t + u)>>>1) - v + ((tmp << MID) & FULL_MASK);;
+    result.d0 = ((t + u)>>>1) - v + ((tmp << MID) & FULL_MASK);
     result.d1 = v + (tmp >> MID);
     return result;
     */

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java

@@ -1928,7 +1928,7 @@ public class TestYarnCLI {
     QueueCLI cli = createAndGetQueueCLI();
     when(client.getQueueInfo(any(String.class))).thenReturn(null);
     int result = cli.run(new String[] { "-status", queueName });
-    assertEquals(-1, result);;
+    assertEquals(-1, result);
     ByteArrayOutputStream baos = new ByteArrayOutputStream();
     PrintWriter pw = new PrintWriter(baos);
     pw.println("Cannot get queue from RM by queueName = " + queueName

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/api/records/impl/pb/ReservationAllocationStatePBImpl.java

@@ -62,7 +62,7 @@ import java.util.List;
 public class ReservationAllocationStatePBImpl extends
         ReservationAllocationState {
   private ReservationAllocationStateProto proto =
-          ReservationAllocationStateProto.getDefaultInstance();;
+          ReservationAllocationStateProto.getDefaultInstance();
   private ReservationAllocationStateProto.Builder builder = null;
   private boolean viaProto = false;
 

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/ConfiguredYarnAuthorizer.java

@@ -43,7 +43,7 @@ public class ConfiguredYarnAuthorizer extends YarnAuthorizationProvider {
   private final ConcurrentMap<PrivilegedEntity, Map<AccessType, AccessControlList>>
       allAcls = new ConcurrentHashMap<>();
   private volatile AccessControlList adminAcl = null;
-  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();;
+  private final ReentrantReadWriteLock lock = new ReentrantReadWriteLock();
   private final ReentrantReadWriteLock.ReadLock readLock = lock.readLock();
   private final ReentrantReadWriteLock.WriteLock writeLock =  lock.writeLock();
 

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/WindowsSecureContainerExecutor.java

@@ -449,7 +449,7 @@ public class WindowsSecureContainerExecutor extends DefaultContainerExecutor {
       COMPLETE
     };
     
-    private State state;;
+    private State state;
     
     private final String cwd;
     private final String jobName;

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/ProportionalCapacityPreemptionPolicy.java

@@ -496,7 +496,7 @@ public class ProportionalCapacityPreemptionPolicy
     Map<ApplicationAttemptId, Set<RMContainer>> toPreempt =
         new HashMap<>();
     Map<PreemptionCandidatesSelector, Map<ApplicationAttemptId,
-        Set<RMContainer>>> toPreemptPerSelector =  new HashMap<>();;
+        Set<RMContainer>>> toPreemptPerSelector =  new HashMap<>();
     for (PreemptionCandidatesSelector selector :
         candidatesSelectionPolicies) {
       long startTime = 0;

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestAppManager.java

@@ -1772,7 +1772,7 @@ public class TestAppManager extends AppManagerTestBase{
       RecordFactory recordFactory) {
     ContainerLaunchContext amContainer = recordFactory.newRecordInstance(
         ContainerLaunchContext.class);
-    amContainer.setApplicationACLs(new HashMap<ApplicationAccessType, String>());;
+    amContainer.setApplicationACLs(new HashMap<ApplicationAccessType, String>());
     return amContainer;
   }
 

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestRMAdminService.java

@@ -109,7 +109,7 @@ import static org.junit.Assert.assertTrue;
 
 public class TestRMAdminService {
 
-  private Configuration configuration;;
+  private Configuration configuration;
   private MockRM rm = null;
   private FileSystem fs;
   private Path workingPath;