瀏覽代碼

MAPREDUCE-5007. fix coverage org.apache.hadoop.mapreduce.v2.hs (Aleksey Gorshkov via tgraves)

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1465018 13f79535-47bb-0310-9956-ffa450edef68
Thomas Graves 12 年之前
父節點
當前提交
0541571180

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -117,6 +117,9 @@ Release 0.23.7 - UNRELEASED
 
 
     MAPREDUCE-4991. coverage for gridmix (Aleksey Gorshkov via tgraves)
     MAPREDUCE-4991. coverage for gridmix (Aleksey Gorshkov via tgraves)
 
 
+    MAPREDUCE-5007. fix coverage org.apache.hadoop.mapreduce.v2.hs (Aleksey
+    Gorshkov via tgraves)
+
 Release 0.23.6 - 2013-02-06
 Release 0.23.6 - 2013-02-06
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 13 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/test/resources/mapred-queues.xml

@@ -1,4 +1,17 @@
 <?xml version="1.0"?>
 <?xml version="1.0"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
 <!-- This is the template for queue configuration. The format supports nesting of
 <!-- This is the template for queue configuration. The format supports nesting of
      queues within queues - a feature called hierarchical queues. All queues are
      queues within queues - a feature called hierarchical queues. All queues are
      defined within the 'queues' tag which is the top level element for this
      defined within the 'queues' tag which is the top level element for this

+ 4 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/main/java/org/apache/hadoop/mapreduce/v2/hs/HistoryFileManager.java

@@ -870,4 +870,8 @@ public class HistoryFileManager extends AbstractService {
       }
       }
     }
     }
   }
   }
+  @VisibleForTesting
+  protected void setMaxHistoryAge(long newValue){
+    maxHistoryAge=newValue;
+  } 
 }
 }

+ 42 - 13
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestCompletedTask.java

@@ -21,46 +21,75 @@ package org.apache.hadoop.mapreduce.v2.hs;
 import java.util.Map;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.TreeMap;
 
 
+import org.apache.hadoop.mapreduce.JobID;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
 import org.apache.hadoop.mapreduce.TaskAttemptID;
+import org.apache.hadoop.mapreduce.TaskID;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.TaskType;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskAttemptInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
 import org.apache.hadoop.mapreduce.jobhistory.JobHistoryParser.TaskInfo;
+import org.apache.hadoop.mapreduce.v2.api.records.Phase;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskReport;
 import org.apache.hadoop.mapreduce.v2.hs.CompletedTask;
 import org.apache.hadoop.mapreduce.v2.hs.CompletedTask;
-import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
-import org.mockito.Mockito;
+import static org.mockito.Mockito.*;
+import static org.junit.Assert.*;
 
 
 public class TestCompletedTask{
 public class TestCompletedTask{
 
 
-  @Test
+  @Test (timeout=5000)
   public void testTaskStartTimes() {
   public void testTaskStartTimes() {
     
     
-    TaskId taskId = Mockito.mock(TaskId.class); 
-    TaskInfo taskInfo = Mockito.mock(TaskInfo.class);
+    TaskId taskId = mock(TaskId.class); 
+    TaskInfo taskInfo = mock(TaskInfo.class);
     Map<TaskAttemptID, TaskAttemptInfo> taskAttempts
     Map<TaskAttemptID, TaskAttemptInfo> taskAttempts
       = new TreeMap<TaskAttemptID, TaskAttemptInfo>();
       = new TreeMap<TaskAttemptID, TaskAttemptInfo>();
     
     
     TaskAttemptID id = new TaskAttemptID("0", 0, TaskType.MAP, 0, 0);
     TaskAttemptID id = new TaskAttemptID("0", 0, TaskType.MAP, 0, 0);
-    TaskAttemptInfo info = Mockito.mock(TaskAttemptInfo.class);
-    Mockito.when(info.getAttemptId()).thenReturn(id);
-    Mockito.when(info.getStartTime()).thenReturn(10l);
+    TaskAttemptInfo info = mock(TaskAttemptInfo.class);
+    when(info.getAttemptId()).thenReturn(id);
+    when(info.getStartTime()).thenReturn(10l);
     taskAttempts.put(id, info);
     taskAttempts.put(id, info);
     
     
     id = new TaskAttemptID("1", 0, TaskType.MAP, 1, 1);
     id = new TaskAttemptID("1", 0, TaskType.MAP, 1, 1);
-    info = Mockito.mock(TaskAttemptInfo.class);
-    Mockito.when(info.getAttemptId()).thenReturn(id);
-    Mockito.when(info.getStartTime()).thenReturn(20l);
+    info = mock(TaskAttemptInfo.class);
+    when(info.getAttemptId()).thenReturn(id);
+    when(info.getStartTime()).thenReturn(20l);
     taskAttempts.put(id, info);
     taskAttempts.put(id, info);
     
     
     
     
-    Mockito.when(taskInfo.getAllTaskAttempts()).thenReturn(taskAttempts);
+    when(taskInfo.getAllTaskAttempts()).thenReturn(taskAttempts);
     CompletedTask task = new CompletedTask(taskId, taskInfo);
     CompletedTask task = new CompletedTask(taskId, taskInfo);
     TaskReport report = task.getReport();
     TaskReport report = task.getReport();
 
 
     // Make sure the startTime returned by report is the lesser of the 
     // Make sure the startTime returned by report is the lesser of the 
     // attempy launch times
     // attempy launch times
-    Assert.assertTrue(report.getStartTime() == 10);
+    assertTrue(report.getStartTime() == 10);
+  }
+  /**
+   * test some methods of CompletedTaskAttempt
+   */
+  @Test (timeout=5000)
+  public void testCompletedTaskAttempt(){
+    
+    TaskAttemptInfo attemptInfo= mock(TaskAttemptInfo.class);
+    when(attemptInfo.getRackname()).thenReturn("Rackname");
+    when(attemptInfo.getShuffleFinishTime()).thenReturn(11L);
+    when(attemptInfo.getSortFinishTime()).thenReturn(12L);
+    when(attemptInfo.getShufflePort()).thenReturn(10);
+    
+    JobID jobId= new JobID("12345",0);
+    TaskID taskId =new TaskID(jobId,TaskType.REDUCE, 0);
+    TaskAttemptID taskAttemptId= new TaskAttemptID(taskId, 0);
+    when(attemptInfo.getAttemptId()).thenReturn(taskAttemptId);
+    
+    
+    CompletedTaskAttempt taskAttemt= new CompletedTaskAttempt(null,attemptInfo);
+    assertEquals( "Rackname",   taskAttemt.getNodeRackName());
+    assertEquals( Phase.CLEANUP,   taskAttemt.getPhase());
+    assertTrue(  taskAttemt.isFinished());
+    assertEquals( 11L,   taskAttemt.getShuffleFinishTime());
+    assertEquals( 12L,   taskAttemt.getSortFinishTime());
+    assertEquals( 10,   taskAttemt.getShufflePort());
   }
   }
 }
 }

+ 45 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryEntities.java

@@ -18,6 +18,8 @@
 package org.apache.hadoop.mapreduce.v2.hs;
 package org.apache.hadoop.mapreduce.v2.hs;
 
 
 import static junit.framework.Assert.assertEquals;
 import static junit.framework.Assert.assertEquals;
+import static junit.framework.Assert.assertTrue;
+import static junit.framework.Assert.assertNull;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
@@ -45,6 +47,7 @@ import org.junit.Test;
 import org.junit.runner.RunWith;
 import org.junit.runner.RunWith;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized;
 import org.junit.runners.Parameterized.Parameters;
 import org.junit.runners.Parameterized.Parameters;
+import org.apache.hadoop.mapred.TaskCompletionEvent;
 
 
 import static org.mockito.Mockito.*;
 import static org.mockito.Mockito.*;
 
 
@@ -79,7 +82,7 @@ public class TestJobHistoryEntities {
   }
   }
 
 
   /* Verify some expected values based on the history file */
   /* Verify some expected values based on the history file */
-  @Test (timeout=50000)
+  @Test (timeout=100000)
   public void testCompletedJob() throws Exception {
   public void testCompletedJob() throws Exception {
     HistoryFileInfo info = mock(HistoryFileInfo.class);
     HistoryFileInfo info = mock(HistoryFileInfo.class);
     when(info.getConfFile()).thenReturn(fullConfPath);
     when(info.getConfFile()).thenReturn(fullConfPath);
@@ -168,4 +171,45 @@ public class TestJobHistoryEntities {
     assertEquals(45454, rta1Report.getNodeManagerPort());
     assertEquals(45454, rta1Report.getNodeManagerPort());
     assertEquals(9999, rta1Report.getNodeManagerHttpPort());
     assertEquals(9999, rta1Report.getNodeManagerHttpPort());
   }
   }
+  /**
+   * Simple test of some methods of CompletedJob
+   * @throws Exception
+   */
+  @Test (timeout=30000)
+  public void testGetTaskAttemptCompletionEvent() throws Exception{
+    HistoryFileInfo info = mock(HistoryFileInfo.class);
+    when(info.getConfFile()).thenReturn(fullConfPath);
+    completedJob =
+      new CompletedJob(conf, jobId, fulleHistoryPath, loadTasks, "user",
+          info, jobAclsManager);
+    TaskCompletionEvent[] events= completedJob.getMapAttemptCompletionEvents(0,1000);
+    assertEquals(10, completedJob.getMapAttemptCompletionEvents(0,10).length);
+    int currentEventId=0;
+    for (TaskCompletionEvent taskAttemptCompletionEvent : events) {
+      int eventId= taskAttemptCompletionEvent.getEventId();
+      assertTrue(eventId>=currentEventId);
+      currentEventId=eventId;
+    }
+    assertNull(completedJob.loadConfFile() );
+    // job name
+    assertEquals("Sleep job",completedJob.getName());
+    // queue name
+    assertEquals("default",completedJob.getQueueName());
+    // progress
+    assertEquals(1.0, completedJob.getProgress(),0.001);
+    // 11 rows in answer
+    assertEquals(11,completedJob.getTaskAttemptCompletionEvents(0,1000).length);
+    // select first 10 rows
+    assertEquals(10,completedJob.getTaskAttemptCompletionEvents(0,10).length);
+    // select 5-10 rows include 5th
+    assertEquals(6,completedJob.getTaskAttemptCompletionEvents(5,10).length);
+
+    // without errors
+    assertEquals(1,completedJob.getDiagnostics().size());
+    assertEquals("",completedJob.getDiagnostics().get(0));
+
+    assertEquals(0, completedJob.getJobACLs().size());
+
+  }
+
 }
 }

+ 138 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryParsing.java

@@ -19,6 +19,10 @@
 package org.apache.hadoop.mapreduce.v2.hs;
 package org.apache.hadoop.mapreduce.v2.hs;
 
 
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
+import static junit.framework.Assert.assertEquals;
+import static org.junit.Assert.assertNull;
+import static org.junit.Assert.assertTrue;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
 import java.util.Arrays;
 import java.util.Arrays;
@@ -54,6 +58,9 @@ import org.apache.hadoop.mapreduce.v2.api.records.JobState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
 import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl;
 import org.apache.hadoop.mapreduce.v2.app.MRApp;
 import org.apache.hadoop.mapreduce.v2.app.MRApp;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Job;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
 import org.apache.hadoop.mapreduce.v2.app.job.Task;
@@ -65,7 +72,9 @@ import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory;
 import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.FileNameIndexUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobHistoryUtils;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
 import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
+import org.apache.hadoop.mapreduce.v2.hs.webapp.dao.JobsInfo;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.service.Service;
 import org.apache.hadoop.yarn.service.Service;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.BuilderUtils;
@@ -96,7 +105,7 @@ public class TestJobHistoryParsing {
     info.printAll();
     info.printAll();
   }
   }
 
 
-  @Test (timeout=50000)
+  @Test (timeout=300000)
   public void testHistoryParsing() throws Exception {
   public void testHistoryParsing() throws Exception {
     LOG.info("STARTING testHistoryParsing()");
     LOG.info("STARTING testHistoryParsing()");
     try {
     try {
@@ -360,7 +369,7 @@ public class TestJobHistoryParsing {
     return numFinishedMaps;
     return numFinishedMaps;
   }
   }
   
   
-  @Test (timeout=50000)
+  @Test (timeout=30000)
   public void testHistoryParsingForFailedAttempts() throws Exception {
   public void testHistoryParsingForFailedAttempts() throws Exception {
     LOG.info("STARTING testHistoryParsingForFailedAttempts");
     LOG.info("STARTING testHistoryParsingForFailedAttempts");
     try {
     try {
@@ -427,7 +436,7 @@ public class TestJobHistoryParsing {
     }
     }
   }
   }
   
   
-  @Test (timeout=50000)
+  @Test (timeout=60000)
   public void testCountersForFailedTask() throws Exception {
   public void testCountersForFailedTask() throws Exception {
     LOG.info("STARTING testCountersForFailedTask");
     LOG.info("STARTING testCountersForFailedTask");
     try {
     try {
@@ -580,4 +589,130 @@ public class TestJobHistoryParsing {
     t.testHistoryParsing();
     t.testHistoryParsing();
     t.testHistoryParsingForFailedAttempts();
     t.testHistoryParsingForFailedAttempts();
   }
   }
+
+  /**
+   * Test clean old history files. Files should be deleted after 1 week by
+   * default.
+   */
+  @Test(timeout = 15000)
+  public void testDeleteFileInfo() throws Exception {
+    LOG.info("STARTING testDeleteFileInfo");
+    try {
+      Configuration conf = new Configuration();
+
+      conf.setClass(
+          CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+          MyResolver.class, DNSToSwitchMapping.class);
+
+      RackResolver.init(conf);
+      MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(),
+          true);
+      app.submit(conf);
+      Job job = app.getContext().getAllJobs().values().iterator().next();
+      JobId jobId = job.getID();
+
+      app.waitForState(job, JobState.SUCCEEDED);
+
+      // make sure all events are flushed
+      app.waitForState(Service.STATE.STOPPED);
+      HistoryFileManager hfm = new HistoryFileManager();
+      hfm.init(conf);
+      HistoryFileInfo fileInfo = hfm.getFileInfo(jobId);
+      hfm.initExisting();
+      // wait for move files form the done_intermediate directory to the gone
+      // directory
+      while (fileInfo.isMovePending()) {
+        Thread.sleep(300);
+      }
+
+      Assert.assertNotNull(hfm.jobListCache.values());
+
+      // try to remove fileInfo
+      hfm.clean();
+      // check that fileInfo does not deleted
+      Assert.assertFalse(fileInfo.isDeleted());
+      // correct live time
+      hfm.setMaxHistoryAge(-1);
+      hfm.clean();
+      // should be deleted !
+      Assert.assertTrue("file should be deleted ", fileInfo.isDeleted());
+
+    } finally {
+      LOG.info("FINISHED testDeleteFileInfo");
+    }
+  }
+
+  /**
+   * Simple test some methods of JobHistory
+   */
+  @Test(timeout = 20000)
+  public void testJobHistoryMethods() throws Exception {
+    LOG.info("STARTING testJobHistoryMethods");
+    try {
+      Configuration configuration = new Configuration();
+      configuration.setClass(
+              CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+              MyResolver.class, DNSToSwitchMapping.class);
+
+      RackResolver.init(configuration);
+      MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(),
+          true);
+      app.submit(configuration);
+      Job job = app.getContext().getAllJobs().values().iterator().next();
+      JobId jobId = job.getID();
+      LOG.info("JOBID is " + TypeConverter.fromYarn(jobId).toString());
+      app.waitForState(job, JobState.SUCCEEDED);
+
+      JobHistory jobHistory = new JobHistory();
+      jobHistory.init(configuration);
+      // Method getAllJobs
+      Assert.assertEquals(1, jobHistory.getAllJobs().size());
+      // and with ApplicationId
+      Assert.assertEquals(1, jobHistory.getAllJobs(app.getAppID()).size());
+
+      JobsInfo jobsinfo = jobHistory.getPartialJobs(0L, 10L, null, "default",
+          0L, System.currentTimeMillis() + 1, 0L,
+          System.currentTimeMillis() + 1, JobState.SUCCEEDED);
+      
+      Assert.assertEquals(1, jobsinfo.getJobs().size());
+      Assert.assertNotNull(jobHistory.getApplicationAttemptId());
+      // test Application Id
+      Assert.assertEquals("application_0_0000", jobHistory.getApplicationID()
+          .toString());
+      Assert.assertEquals("Job History Server", jobHistory.getApplicationName());
+      // method does not work
+      Assert.assertNull(jobHistory.getEventHandler());
+      // method does not work
+      Assert.assertNull(jobHistory.getClock());
+      // method does not work
+      Assert.assertNull(jobHistory.getClusterInfo());
+
+
+    } finally {
+      LOG.info("FINISHED testJobHistoryMethods");
+    }
+  }
+
+  /**
+   * Simple test PartialJob
+   */
+  @Test(timeout = 1000)
+  public void testPartialJob() throws Exception {
+    JobId jobId = new JobIdPBImpl();
+    jobId.setId(0);
+    JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user",
+            "jobName", jobId, 3, 2, "JobStatus");
+    PartialJob test = new PartialJob(jii, jobId);
+    assertEquals(1.0f, test.getProgress(), 0.001);
+    assertNull(test.getAllCounters());
+    assertNull(test.getTasks());
+    assertNull(test.getTasks(TaskType.MAP));
+    assertNull(test.getTask(new TaskIdPBImpl()));
+
+    assertNull(test.getTaskAttemptCompletionEvents(0, 100));
+    assertNull(test.getMapAttemptCompletionEvents(0, 100));
+    assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null));
+    assertNull(test.getAMInfos());
+
+  }
 }
 }

+ 298 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/TestJobHistoryServer.java

@@ -0,0 +1,298 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.mapreduce.v2.hs;
+
+
+import java.util.Map;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
+import org.apache.hadoop.mapreduce.TaskCounter;
+import org.apache.hadoop.mapreduce.v2.api.MRClientProtocol;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.CancelDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetCountersResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDelegationTokenResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetDiagnosticsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetJobReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptCompletionEventsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskAttemptReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.GetTaskReportsResponse;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenRequest;
+import org.apache.hadoop.mapreduce.v2.api.protocolrecords.RenewDelegationTokenResponse;
+import org.apache.hadoop.mapreduce.v2.api.records.JobId;
+import org.apache.hadoop.mapreduce.v2.api.records.JobState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskAttemptId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskId;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskState;
+import org.apache.hadoop.mapreduce.v2.api.records.TaskType;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.JobIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.api.records.impl.pb.TaskIdPBImpl;
+import org.apache.hadoop.mapreduce.v2.app.MRApp;
+import org.apache.hadoop.mapreduce.v2.app.job.Job;
+import org.apache.hadoop.mapreduce.v2.app.job.Task;
+import org.apache.hadoop.mapreduce.v2.app.job.TaskAttempt;
+import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryEvents.MRAppWithHistory;
+import org.apache.hadoop.mapreduce.v2.hs.TestJobHistoryParsing.MyResolver;
+import org.apache.hadoop.mapreduce.v2.jobhistory.JobIndexInfo;
+import org.apache.hadoop.net.DNSToSwitchMapping;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.util.ExitUtil;
+import org.apache.hadoop.yarn.factories.RecordFactory;
+import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
+import org.apache.hadoop.yarn.service.Service;
+import org.apache.hadoop.yarn.service.Service.STATE;
+import org.apache.hadoop.yarn.util.RackResolver;
+import org.junit.After;
+import org.junit.Test;
+
+import static org.junit.Assert.*;
+
+/*
+test JobHistoryServer protocols....
+ */
+public class TestJobHistoryServer {
+  private static RecordFactory recordFactory = RecordFactoryProvider
+          .getRecordFactory(null);
+
+
+  
+  JobHistoryServer historyServer=null;
+  // simple test init/start/stop   JobHistoryServer. Status should change.
+  
+  @Test (timeout= 50000 )
+  public void testStartStopServer() throws Exception {
+
+    historyServer = new JobHistoryServer();
+    Configuration config = new Configuration();
+    historyServer.init(config);
+    assertEquals(STATE.INITED, historyServer.getServiceState());
+    assertEquals(3, historyServer.getServices().size());
+    historyServer.start();
+    assertEquals(STATE.STARTED, historyServer.getServiceState());
+    historyServer.stop();
+    assertEquals(STATE.STOPPED, historyServer.getServiceState());
+    assertNotNull(historyServer.getClientService());
+    HistoryClientService historyService = historyServer.getClientService();
+    assertNotNull(historyService.getClientHandler().getConnectAddress());
+
+  }
+
+  /*
+     Simple test PartialJob
+  */
+  @Test
+  public void testPartialJob() throws Exception {
+    JobId jobId = new JobIdPBImpl();
+    jobId.setId(0);
+    JobIndexInfo jii = new JobIndexInfo(0L, System.currentTimeMillis(), "user",
+            "jobName", jobId, 3, 2, "JobStatus");
+    PartialJob test = new PartialJob(jii, jobId);
+    assertEquals(1.0f, test.getProgress(), 0.001);
+    assertNull(test.getAllCounters());
+    assertNull(test.getTasks());
+    assertNull(test.getTasks(TaskType.MAP));
+    assertNull(test.getTask(new TaskIdPBImpl()));
+
+    assertNull(test.getTaskAttemptCompletionEvents(0, 100));
+    assertNull(test.getMapAttemptCompletionEvents(0, 100));
+    assertTrue(test.checkAccess(UserGroupInformation.getCurrentUser(), null));
+    assertNull(test.getAMInfos());
+
+  }
+
+  //Test reports of  JobHistoryServer. History server should get log files from  MRApp and read them
+  
+  @Test (timeout= 50000 )
+  public void testReports() throws Exception {
+    Configuration config = new Configuration();
+    config
+            .setClass(
+                    CommonConfigurationKeysPublic.NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY,
+                    MyResolver.class, DNSToSwitchMapping.class);
+
+    RackResolver.init(config);
+    MRApp app = new MRAppWithHistory(1, 1, true, this.getClass().getName(),
+            true);
+    app.submit(config);
+    Job job = app.getContext().getAllJobs().values().iterator().next();
+    app.waitForState(job, JobState.SUCCEEDED);
+
+    historyServer = new JobHistoryServer();
+
+    historyServer.init(config);
+    historyServer.start();
+    
+    // search JobHistory  service
+    JobHistory jobHistory= null;
+    for (Service service : historyServer.getServices() ) {
+      if (service instanceof JobHistory) {
+        jobHistory = (JobHistory) service;
+      }
+    };
+    
+    Map<JobId, Job> jobs= jobHistory.getAllJobs();
+    
+    assertEquals(1, jobs.size());
+    assertEquals("job_0_0000",jobs.keySet().iterator().next().toString());
+    
+    
+    Task task = job.getTasks().values().iterator().next();
+    TaskAttempt attempt = task.getAttempts().values().iterator().next();
+
+    HistoryClientService historyService = historyServer.getClientService();
+    MRClientProtocol protocol = historyService.getClientHandler();
+
+    GetTaskAttemptReportRequest gtarRequest = recordFactory
+            .newRecordInstance(GetTaskAttemptReportRequest.class);
+    // test getTaskAttemptReport
+    TaskAttemptId taId = attempt.getID();
+    taId.setTaskId(task.getID());
+    taId.getTaskId().setJobId(job.getID());
+    gtarRequest.setTaskAttemptId(taId);
+    GetTaskAttemptReportResponse response = protocol
+            .getTaskAttemptReport(gtarRequest);
+    assertEquals("container_0_0000_01_000000", response.getTaskAttemptReport()
+            .getContainerId().toString());
+    assertTrue(response.getTaskAttemptReport().getDiagnosticInfo().isEmpty());
+    // counters
+    assertNotNull(response.getTaskAttemptReport().getCounters()
+            .getCounter(TaskCounter.PHYSICAL_MEMORY_BYTES));
+    assertEquals(taId.toString(), response.getTaskAttemptReport()
+            .getTaskAttemptId().toString());
+    // test getTaskReport
+    GetTaskReportRequest request = recordFactory
+            .newRecordInstance(GetTaskReportRequest.class);
+    TaskId taskId = task.getID();
+    taskId.setJobId(job.getID());
+    request.setTaskId(taskId);
+    GetTaskReportResponse reportResponse = protocol.getTaskReport(request);
+    assertEquals("", reportResponse.getTaskReport().getDiagnosticsList()
+            .iterator().next());
+    // progress
+    assertEquals(1.0f, reportResponse.getTaskReport().getProgress(), 0.01);
+    // report has corrected taskId
+    assertEquals(taskId.toString(), reportResponse.getTaskReport().getTaskId()
+            .toString());
+    // Task state should be SUCCEEDED
+    assertEquals(TaskState.SUCCEEDED, reportResponse.getTaskReport()
+            .getTaskState());
+    // test getTaskAttemptCompletionEvents
+    GetTaskAttemptCompletionEventsRequest taskAttemptRequest = recordFactory
+            .newRecordInstance(GetTaskAttemptCompletionEventsRequest.class);
+    taskAttemptRequest.setJobId(job.getID());
+    GetTaskAttemptCompletionEventsResponse taskAttemptCompletionEventsResponse = protocol
+            .getTaskAttemptCompletionEvents(taskAttemptRequest);
+    assertEquals(0, taskAttemptCompletionEventsResponse.getCompletionEventCount());
+    
+    // test getDiagnostics
+    GetDiagnosticsRequest diagnosticRequest = recordFactory
+            .newRecordInstance(GetDiagnosticsRequest.class);
+    diagnosticRequest.setTaskAttemptId(taId);
+    GetDiagnosticsResponse diagnosticResponse = protocol
+            .getDiagnostics(diagnosticRequest);
+    // it is strange : why one empty string ?
+    assertEquals(1, diagnosticResponse.getDiagnosticsCount());
+    assertEquals("", diagnosticResponse.getDiagnostics(0));
+
+    GetCountersRequest counterRequest = recordFactory
+            .newRecordInstance(GetCountersRequest.class);
+    counterRequest.setJobId(job.getID());
+    GetCountersResponse counterResponse = protocol.getCounters(counterRequest);
+    assertNotNull(counterResponse.getCounters().getCounterGroup("org.apache.hadoop.mapreduce.JobCounter"));
+    // test getJobReport
+    GetJobReportRequest reportRequest = recordFactory
+            .newRecordInstance(GetJobReportRequest.class);
+    reportRequest.setJobId(job.getID());
+    GetJobReportResponse jobReport = protocol.getJobReport(reportRequest);
+    assertEquals(1, jobReport.getJobReport().getAMInfos().size());
+    assertNotNull(jobReport.getJobReport().getJobFile());
+    assertEquals(job.getID().toString(), jobReport.getJobReport().getJobId().toString());
+    assertNotNull(jobReport.getJobReport().getTrackingUrl());
+
+
+    //getTaskReports
+    GetTaskReportsRequest taskReportRequest = recordFactory
+            .newRecordInstance(GetTaskReportsRequest.class);
+    taskReportRequest.setJobId(job.getID());
+    taskReportRequest.setTaskType(TaskType.MAP);
+    GetTaskReportsResponse taskReportsResponse = protocol.getTaskReports(taskReportRequest);
+    assertEquals(1, taskReportsResponse.getTaskReportList().size());
+    assertEquals(1, taskReportsResponse.getTaskReportCount());
+    assertEquals(task.getID(), taskReportsResponse.getTaskReport(0).getTaskId());
+    assertEquals(TaskState.SUCCEEDED, taskReportsResponse.getTaskReport(0).getTaskState());
+
+    //getDelegationToken
+    GetDelegationTokenRequest delegationTokenRequest = recordFactory
+            .newRecordInstance(GetDelegationTokenRequest.class);
+    String s = UserGroupInformation.getCurrentUser().getShortUserName();
+    delegationTokenRequest.setRenewer(s);
+    GetDelegationTokenResponse delegationTokenResponse = protocol.getDelegationToken(delegationTokenRequest);
+    assertEquals("MR_DELEGATION_TOKEN", delegationTokenResponse.getDelegationToken().getKind());
+    assertNotNull(delegationTokenResponse.getDelegationToken().getIdentifier());
+
+    //renewDelegationToken
+
+    RenewDelegationTokenRequest renewDelegationRequest = recordFactory
+            .newRecordInstance(RenewDelegationTokenRequest.class);
+    renewDelegationRequest.setDelegationToken(delegationTokenResponse.getDelegationToken());
+    RenewDelegationTokenResponse renewDelegationTokenResponse = protocol.renewDelegationToken(renewDelegationRequest);
+    // should be about 1 day
+    assertTrue(renewDelegationTokenResponse.getNextExpirationTime() > 0);
+
+
+// cancelDelegationToken   
+
+    CancelDelegationTokenRequest cancelDelegationTokenRequest = recordFactory
+            .newRecordInstance(CancelDelegationTokenRequest.class);
+    cancelDelegationTokenRequest.setDelegationToken(delegationTokenResponse.getDelegationToken());
+    assertNotNull(protocol.cancelDelegationToken(cancelDelegationTokenRequest));
+
+    historyServer.stop();
+  }
+ // test main method
+  @Test (timeout =60000)
+  public void testMainMethod() throws Exception {
+
+    ExitUtil.disableSystemExit();
+    try {
+      JobHistoryServer.main(new String[0]);
+
+    } catch (ExitUtil.ExitException e) {
+      assertEquals(0,e.status);
+      ExitUtil.resetFirstExitException();
+      fail();
+    }
+  }
+  
+  @After
+  public void stop(){
+    if(historyServer !=null && !STATE.STOPPED.equals(historyServer.getServiceState())){
+      historyServer.stop();
+    }
+  }
+}