Browse Source

HADOOP-15254. Correct the wrong word spelling 'intialize'. Contributed by fang zhenyi.

Arpit Agarwal 7 years ago
parent
commit
9f3be797be

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/log/Log4Json.java

@@ -103,7 +103,7 @@ public class Log4Json extends Layout {
   /**
    * Jackson factories are thread safe when constructing parsers and generators.
    * They are not thread safe in configure methods; if there is to be any
-   * configuration it must be done in a static intializer block.
+   * configuration it must be done in a static initializer block.
    */
   private static final JsonFactory factory = new MappingJsonFactory();
   private static final ObjectReader READER = new ObjectMapper(factory).reader();

+ 1 - 1
hadoop-common-project/hadoop-kms/src/main/java/org/apache/hadoop/crypto/key/kms/server/KMSWebApp.java

@@ -152,7 +152,7 @@ public class KMSWebApp implements ServletContextListener {
 
       kmsAudit = new KMSAudit(kmsConf);
 
-      // intializing the KeyProvider
+      // initializing the KeyProvider
       String providerString = kmsConf.get(KMSConfiguration.KEY_PROVIDER_URI);
       if (providerString == null) {
         throw new IllegalStateException("No KeyProvider has been defined");

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/federation/store/driver/StateStoreDriver.java

@@ -73,7 +73,7 @@ public abstract class StateStoreDriver implements StateStoreRecordOperations {
 
     boolean success = initDriver();
     if (!success) {
-      LOG.error("Cannot intialize driver for {}", getDriverName());
+      LOG.error("Cannot initialize driver for {}", getDriverName());
       return false;
     }
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -240,7 +240,7 @@ public class NamenodeWebHdfsMethods {
       final String remoteAddr, final HdfsFileStatus status) throws IOException {
     FSNamesystem fsn = namenode.getNamesystem();
     if (fsn == null) {
-      throw new IOException("Namesystem has not been intialized yet.");
+      throw new IOException("Namesystem has not been initialized yet.");
     }
     final BlockManager bm = fsn.getBlockManager();
     

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/web/resources/TestWebHdfsDataLocality.java

@@ -243,7 +243,7 @@ public class TestWebHdfsDataLocality {
     NameNode nn = mock(NameNode.class);
     when(nn.getNamesystem()).thenReturn(null);
     exception.expect(IOException.class);
-    exception.expectMessage("Namesystem has not been intialized yet.");
+    exception.expectMessage("Namesystem has not been initialized yet.");
     NamenodeWebHdfsMethods.chooseDatanode(nn, "/path", PutOpParam.Op.CREATE, 0,
         DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT, null, LOCALHOST, null);
   }

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/MRAppMaster.java

@@ -1255,7 +1255,7 @@ public class MRAppMaster extends CompositeService {
 
     boolean initFailed = false;
     if (!errorHappenedShutDown) {
-      // create a job event for job intialization
+      // create a job event for job initialization
       JobEvent initJobEvent = new JobEvent(job.getID(), JobEventType.JOB_INIT);
       // Send init to the job (this does NOT trigger job execution)
       // This is a synchronous call, not an event through dispatcher. We want

+ 1 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/JobHistory.java

@@ -98,7 +98,7 @@ public class JobHistory extends AbstractService implements HistoryContext {
     try {
       hsManager.initExisting();
     } catch (IOException e) {
-      throw new YarnRuntimeException("Failed to intialize existing directories", e);
+      throw new YarnRuntimeException("Failed to initialize existing directories", e);
     }
 
     storage = createHistoryStorage();

+ 1 - 1
hadoop-tools/hadoop-streaming/src/main/java/org/apache/hadoop/streaming/StreamJob.java

@@ -154,7 +154,7 @@ public class StreamJob implements Tool {
 
   /**
    * This is the method that actually
-   * intializes the job conf and submits the job
+   * initializes the job conf and submits the job
    * to the jobtracker
    * @throws IOException
    * @deprecated use {@link #run(String[])} instead.

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/monitor/capacity/QueuePriorityContainerCandidateSelector.java

@@ -117,7 +117,7 @@ public class QueuePriorityContainerCandidateSelector
     return list;
   }
 
-  private void intializePriorityDigraph() {
+  private void initializePriorityDigraph() {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Initializing priority preemption directed graph:");
     }
@@ -383,7 +383,7 @@ public class QueuePriorityContainerCandidateSelector
     // Initialize digraph from queues
     // TODO (wangda): only do this when queue refreshed.
     priorityDigraph.clear();
-    intializePriorityDigraph();
+    initializePriorityDigraph();
 
     // When all queues are set to same priority, or priority is not respected,
     // direct return.

+ 6 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceTrackerService.java

@@ -540,7 +540,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     try {
       nodeLabelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("A", "B", "C"));
     } catch (IOException e) {
-      Assert.fail("Caught Exception while intializing");
+      Assert.fail("Caught Exception while initializing");
       e.printStackTrace();
     }
 
@@ -589,7 +589,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     try {
       nodeLabelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("X", "Y", "Z"));
     } catch (IOException e) {
-      Assert.fail("Caught Exception while intializing");
+      Assert.fail("Caught Exception while initializing");
       e.printStackTrace();
     }
 
@@ -642,7 +642,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     try {
       nodeLabelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("X", "Y", "Z"));
     } catch (IOException e) {
-      Assert.fail("Caught Exception while intializing");
+      Assert.fail("Caught Exception while initializing");
       e.printStackTrace();
     }
 
@@ -694,7 +694,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     try {
       nodeLabelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("A", "B", "C"));
     } catch (IOException e) {
-      Assert.fail("Caught Exception while intializing");
+      Assert.fail("Caught Exception while initializing");
       e.printStackTrace();
     }
     ResourceTrackerService resourceTrackerService =
@@ -754,7 +754,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     try {
       nodeLabelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("A", "B", "C"));
     } catch (IOException e) {
-      Assert.fail("Caught Exception while intializing");
+      Assert.fail("Caught Exception while initializing");
       e.printStackTrace();
     }
 
@@ -839,7 +839,7 @@ public class TestResourceTrackerService extends NodeLabelTestBase {
     try {
       nodeLabelsMgr.addToCluserNodeLabelsWithDefaultExclusivity(toSet("A", "B", "C"));
     } catch (IOException e) {
-      Assert.fail("Caught Exception while intializing");
+      Assert.fail("Caught Exception while initializing");
       e.printStackTrace();
     }