瀏覽代碼

YARN-629. Make YarnRemoteException not be rooted at IOException. Contributed by Xuan Gong.
MAPREDUCE-5204. Handling YarnRemoteException separately from IOException in MR app after YARN-629. Contributed by Xuan Gong.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1479680 13f79535-47bb-0310-9956-ffa450edef68

Vinod Kumar Vavilapalli 12 年之前
父節點
當前提交
92b7165a71
共有 39 個文件被更改,包括 284 次插入131 次删除
  1. 3 0
      hadoop-mapreduce-project/CHANGES.txt
  2. 5 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/MRDelegationTokenRenewer.java
  3. 2 2
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java
  4. 66 21
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java
  5. 43 19
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java
  6. 10 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java
  7. 5 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java
  8. 2 3
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java
  9. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java
  10. 2 1
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java
  11. 3 0
      hadoop-yarn-project/CHANGES.txt
  12. 1 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRemoteException.java
  13. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java
  14. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java
  15. 9 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/RMAdmin.java
  16. 20 4
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java
  17. 5 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier.java
  18. 1 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java
  19. 7 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java
  20. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java
  21. 3 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java
  22. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java
  23. 4 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java
  24. 10 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java
  25. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
  26. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java
  27. 5 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java
  28. 7 5
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java
  29. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java
  30. 14 9
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java
  31. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java
  32. 5 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java
  33. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java
  34. 2 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java
  35. 3 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java
  36. 16 13
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java
  37. 2 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java
  38. 5 3
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
  39. 5 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -377,6 +377,9 @@ Release 2.0.5-beta - UNRELEASED
 
 
     MAPREDUCE-5205. Fixed MR App to load tokens correctly. (vinodkv)
     MAPREDUCE-5205. Fixed MR App to load tokens correctly. (vinodkv)
 
 
+    MAPREDUCE-5204. Handling YarnRemoteException separately from IOException in
+    MR app after YARN-629. (Xuan Gong via vinodkv)
+
 Release 2.0.4-alpha - UNRELEASED
 Release 2.0.4-alpha - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 5 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/src/main/java/org/apache/hadoop/mapreduce/v2/security/MRDelegationTokenRenewer.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.security.token.TokenRenewer;
 import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.api.records.DelegationToken;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
@@ -67,6 +68,8 @@ public class MRDelegationTokenRenewer extends TokenRenewer {
           .newRecord(RenewDelegationTokenRequest.class);
           .newRecord(RenewDelegationTokenRequest.class);
       request.setDelegationToken(dToken);
       request.setDelegationToken(dToken);
       return histProxy.renewDelegationToken(request).getNextExpirationTime();
       return histProxy.renewDelegationToken(request).getNextExpirationTime();
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
     } finally {
     } finally {
       stopHistoryProxy(histProxy);
       stopHistoryProxy(histProxy);
     }
     }
@@ -88,6 +91,8 @@ public class MRDelegationTokenRenewer extends TokenRenewer {
           .newRecord(CancelDelegationTokenRequest.class);
           .newRecord(CancelDelegationTokenRequest.class);
       request.setDelegationToken(dToken);
       request.setDelegationToken(dToken);
       histProxy.cancelDelegationToken(request);
       histProxy.cancelDelegationToken(request);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
     } finally {
     } finally {
       stopHistoryProxy(histProxy);
       stopHistoryProxy(histProxy);
     }
     }

+ 2 - 2
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ClientServiceDelegate.java

@@ -302,13 +302,13 @@ public class ClientServiceDelegate {
         return methodOb.invoke(getProxy(), args);
         return methodOb.invoke(getProxy(), args);
       } catch (YarnRemoteException yre) {
       } catch (YarnRemoteException yre) {
         LOG.warn("Exception thrown by remote end.", yre);
         LOG.warn("Exception thrown by remote end.", yre);
-        throw yre;
+        throw new IOException(yre);
       } catch (InvocationTargetException e) {
       } catch (InvocationTargetException e) {
         if (e.getTargetException() instanceof YarnRemoteException) {
         if (e.getTargetException() instanceof YarnRemoteException) {
           LOG.warn("Error from remote end: " + e
           LOG.warn("Error from remote end: " + e
               .getTargetException().getLocalizedMessage());
               .getTargetException().getLocalizedMessage());
           LOG.debug("Tracing remote error ", e.getTargetException());
           LOG.debug("Tracing remote error ", e.getTargetException());
-          throw (YarnRemoteException) e.getTargetException();
+          throw new IOException(e.getTargetException());
         }
         }
         LOG.debug("Failed to contact AM/History for job " + jobId + 
         LOG.debug("Failed to contact AM/History for job " + jobId + 
             " retrying..", e.getTargetException());
             " retrying..", e.getTargetException());

+ 66 - 21
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/ResourceMgrDelegate.java

@@ -43,6 +43,7 @@ import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.api.records.YarnClusterMetrics;
 import org.apache.hadoop.yarn.client.YarnClientImpl;
 import org.apache.hadoop.yarn.client.YarnClientImpl;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.util.ProtoUtils;
 import org.apache.hadoop.yarn.util.ProtoUtils;
 
 
 public class ResourceMgrDelegate extends YarnClientImpl {
 public class ResourceMgrDelegate extends YarnClientImpl {
@@ -65,11 +66,19 @@ public class ResourceMgrDelegate extends YarnClientImpl {
 
 
   public TaskTrackerInfo[] getActiveTrackers() throws IOException,
   public TaskTrackerInfo[] getActiveTrackers() throws IOException,
       InterruptedException {
       InterruptedException {
-    return TypeConverter.fromYarnNodes(super.getNodeReports());
+    try {
+      return TypeConverter.fromYarnNodes(super.getNodeReports());
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   public JobStatus[] getAllJobs() throws IOException, InterruptedException {
   public JobStatus[] getAllJobs() throws IOException, InterruptedException {
-    return TypeConverter.fromYarnApps(super.getApplicationList(), this.conf);
+    try {
+      return TypeConverter.fromYarnApps(super.getApplicationList(), this.conf);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
   public TaskTrackerInfo[] getBlacklistedTrackers() throws IOException,
@@ -81,11 +90,17 @@ public class ResourceMgrDelegate extends YarnClientImpl {
 
 
   public ClusterMetrics getClusterMetrics() throws IOException,
   public ClusterMetrics getClusterMetrics() throws IOException,
       InterruptedException {
       InterruptedException {
-    YarnClusterMetrics metrics = super.getYarnClusterMetrics();
-    ClusterMetrics oldMetrics = new ClusterMetrics(1, 1, 1, 1, 1, 1, 
-        metrics.getNumNodeManagers() * 10, metrics.getNumNodeManagers() * 2, 1,
-        metrics.getNumNodeManagers(), 0, 0);
-    return oldMetrics;
+    try {
+      YarnClusterMetrics metrics = super.getYarnClusterMetrics();
+      ClusterMetrics oldMetrics =
+          new ClusterMetrics(1, 1, 1, 1, 1, 1,
+              metrics.getNumNodeManagers() * 10,
+              metrics.getNumNodeManagers() * 2, 1,
+              metrics.getNumNodeManagers(), 0, 0);
+      return oldMetrics;
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   InetSocketAddress getConnectAddress() {
   InetSocketAddress getConnectAddress() {
@@ -95,8 +110,12 @@ public class ResourceMgrDelegate extends YarnClientImpl {
   @SuppressWarnings("rawtypes")
   @SuppressWarnings("rawtypes")
   public Token getDelegationToken(Text renewer) throws IOException,
   public Token getDelegationToken(Text renewer) throws IOException,
       InterruptedException {
       InterruptedException {
-    return ProtoUtils.convertFromProtoFormat(
-      super.getRMDelegationToken(renewer), rmAddress);
+    try {
+      return ProtoUtils.convertFromProtoFormat(
+        super.getRMDelegationToken(renewer), rmAddress);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   public String getFilesystemName() throws IOException, InterruptedException {
   public String getFilesystemName() throws IOException, InterruptedException {
@@ -104,36 +123,62 @@ public class ResourceMgrDelegate extends YarnClientImpl {
   }
   }
 
 
   public JobID getNewJobID() throws IOException, InterruptedException {
   public JobID getNewJobID() throws IOException, InterruptedException {
-    this.application = super.getNewApplication();
-    this.applicationId = this.application.getApplicationId();
-    return TypeConverter.fromYarn(applicationId);
+    try {
+      this.application = super.getNewApplication();
+      this.applicationId = this.application.getApplicationId();
+      return TypeConverter.fromYarn(applicationId);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   public QueueInfo getQueue(String queueName) throws IOException,
   public QueueInfo getQueue(String queueName) throws IOException,
   InterruptedException {
   InterruptedException {
-    org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
-        super.getQueueInfo(queueName);
-    return (queueInfo == null) ? null : TypeConverter.fromYarn(queueInfo, conf);
+    try {
+      org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
+          super.getQueueInfo(queueName);
+      return (queueInfo == null) ? null : TypeConverter.fromYarn(queueInfo,
+          conf);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
   public QueueAclsInfo[] getQueueAclsForCurrentUser() throws IOException,
       InterruptedException {
       InterruptedException {
-    return TypeConverter.fromYarnQueueUserAclsInfo(super
-      .getQueueAclsInfo());
+    try {
+      return TypeConverter.fromYarnQueueUserAclsInfo(super
+        .getQueueAclsInfo());
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   public QueueInfo[] getQueues() throws IOException, InterruptedException {
   public QueueInfo[] getQueues() throws IOException, InterruptedException {
-    return TypeConverter.fromYarnQueueInfo(super.getAllQueues(), this.conf);
+    try {
+      return TypeConverter.fromYarnQueueInfo(super.getAllQueues(), this.conf);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
   public QueueInfo[] getRootQueues() throws IOException, InterruptedException {
-    return TypeConverter.fromYarnQueueInfo(super.getRootQueueInfos(), this.conf);
+    try {
+      return TypeConverter.fromYarnQueueInfo(super.getRootQueueInfos(),
+          this.conf);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   public QueueInfo[] getChildQueues(String parent) throws IOException,
   public QueueInfo[] getChildQueues(String parent) throws IOException,
       InterruptedException {
       InterruptedException {
-    return TypeConverter.fromYarnQueueInfo(super.getChildQueueInfos(parent),
-      this.conf);
+    try {
+      return TypeConverter.fromYarnQueueInfo(super.getChildQueueInfos(parent),
+        this.conf);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   public String getStagingAreaDir() throws IOException, InterruptedException {
   public String getStagingAreaDir() throws IOException, InterruptedException {

+ 43 - 19
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/main/java/org/apache/hadoop/mapred/YARNRunner.java

@@ -80,6 +80,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.security.client.RMTokenSelector;
 import org.apache.hadoop.yarn.security.client.RMTokenSelector;
@@ -207,10 +208,15 @@ public class YARNRunner implements ClientProtocol {
     GetDelegationTokenRequest request = recordFactory
     GetDelegationTokenRequest request = recordFactory
       .newRecordInstance(GetDelegationTokenRequest.class);
       .newRecordInstance(GetDelegationTokenRequest.class);
     request.setRenewer(Master.getMasterPrincipal(conf));
     request.setRenewer(Master.getMasterPrincipal(conf));
-    DelegationToken mrDelegationToken = hsProxy.getDelegationToken(request)
-      .getDelegationToken();
-    return ProtoUtils.convertFromProtoFormat(mrDelegationToken,
-                                             hsProxy.getConnectAddress());
+    DelegationToken mrDelegationToken;
+    try {
+      mrDelegationToken = hsProxy.getDelegationToken(request)
+        .getDelegationToken();
+      return ProtoUtils.convertFromProtoFormat(mrDelegationToken,
+          hsProxy.getConnectAddress());
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   @Override
   @Override
@@ -295,19 +301,25 @@ public class YARNRunner implements ClientProtocol {
       createApplicationSubmissionContext(conf, jobSubmitDir, ts);
       createApplicationSubmissionContext(conf, jobSubmitDir, ts);
 
 
     // Submit to ResourceManager
     // Submit to ResourceManager
-    ApplicationId applicationId = resMgrDelegate.submitApplication(appContext);
-
-    ApplicationReport appMaster = resMgrDelegate
-        .getApplicationReport(applicationId);
-    String diagnostics =
-        (appMaster == null ?
-            "application report is null" : appMaster.getDiagnostics());
-    if (appMaster == null || appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
-        || appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
-      throw new IOException("Failed to run job : " +
-        diagnostics);
+    try {
+      ApplicationId applicationId =
+          resMgrDelegate.submitApplication(appContext);
+
+      ApplicationReport appMaster = resMgrDelegate
+          .getApplicationReport(applicationId);
+      String diagnostics =
+          (appMaster == null ?
+              "application report is null" : appMaster.getDiagnostics());
+      if (appMaster == null
+          || appMaster.getYarnApplicationState() == YarnApplicationState.FAILED
+          || appMaster.getYarnApplicationState() == YarnApplicationState.KILLED) {
+        throw new IOException("Failed to run job : " +
+            diagnostics);
+      }
+      return clientCache.getClient(jobId).getJobStatus(jobId);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
     }
     }
-    return clientCache.getClient(jobId).getJobStatus(jobId);
   }
   }
 
 
   private LocalResource createApplicationResource(FileContext fs, Path p, LocalResourceType type)
   private LocalResource createApplicationResource(FileContext fs, Path p, LocalResourceType type)
@@ -552,7 +564,11 @@ public class YARNRunner implements ClientProtocol {
     /* check if the status is not running, if not send kill to RM */
     /* check if the status is not running, if not send kill to RM */
     JobStatus status = clientCache.getClient(arg0).getJobStatus(arg0);
     JobStatus status = clientCache.getClient(arg0).getJobStatus(arg0);
     if (status.getState() != JobStatus.State.RUNNING) {
     if (status.getState() != JobStatus.State.RUNNING) {
-      resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
+      try {
+        resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
+      } catch (YarnRemoteException e) {
+        throw new IOException(e);
+      }
       return;
       return;
     }
     }
 
 
@@ -576,7 +592,11 @@ public class YARNRunner implements ClientProtocol {
       LOG.debug("Error when checking for application status", io);
       LOG.debug("Error when checking for application status", io);
     }
     }
     if (status.getState() != JobStatus.State.KILLED) {
     if (status.getState() != JobStatus.State.KILLED) {
-      resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
+      try {
+        resMgrDelegate.killApplication(TypeConverter.toYarn(arg0).getAppId());
+      } catch (YarnRemoteException e) {
+        throw new IOException(e);
+      }
     }
     }
   }
   }
 
 
@@ -607,7 +627,11 @@ public class YARNRunner implements ClientProtocol {
   @Override
   @Override
   public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
   public LogParams getLogFileParams(JobID jobID, TaskAttemptID taskAttemptID)
       throws IOException {
       throws IOException {
-    return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
+    try {
+      return clientCache.getClient(jobID).getLogFilePath(jobID, taskAttemptID);
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
+    }
   }
   }
 
 
   private static void warnForJavaLibPath(String opts, String component, 
   private static void warnForJavaLibPath(String opts, String component, 

+ 10 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestClientServiceDelegate.java

@@ -115,8 +115,9 @@ public class TestClientServiceDelegate {
     try {
     try {
       clientServiceDelegate.getJobStatus(oldJobId);
       clientServiceDelegate.getJobStatus(oldJobId);
       Assert.fail("Invoke should throw exception after retries.");
       Assert.fail("Invoke should throw exception after retries.");
-    } catch (YarnRemoteException e) {
-      Assert.assertEquals("Job ID doesnot Exist", e.getMessage());
+    } catch (IOException e) {
+      Assert.assertTrue(e.getMessage().contains(
+          "Job ID doesnot Exist"));
     }
     }
   }
   }
 
 
@@ -198,7 +199,8 @@ public class TestClientServiceDelegate {
   }
   }
 
 
   @Test
   @Test
-  public void testReconnectOnAMRestart() throws IOException {
+  public void testReconnectOnAMRestart() throws IOException,
+      YarnRemoteException {
     //test not applicable when AM not reachable
     //test not applicable when AM not reachable
     //as instantiateAMProxy is not called at all
     //as instantiateAMProxy is not called at all
     if(!isAMReachableFromClient) {
     if(!isAMReachableFromClient) {
@@ -265,7 +267,7 @@ public class TestClientServiceDelegate {
   }
   }
   
   
   @Test
   @Test
-  public void testAMAccessDisabled() throws IOException {
+  public void testAMAccessDisabled() throws IOException, YarnRemoteException {
     //test only applicable when AM not reachable
     //test only applicable when AM not reachable
     if(isAMReachableFromClient) {
     if(isAMReachableFromClient) {
       return;
       return;
@@ -317,7 +319,8 @@ public class TestClientServiceDelegate {
   }
   }
   
   
   @Test
   @Test
-  public void testRMDownForJobStatusBeforeGetAMReport() throws IOException {
+  public void testRMDownForJobStatusBeforeGetAMReport() throws IOException,
+      YarnRemoteException {
     Configuration conf = new YarnConfiguration();
     Configuration conf = new YarnConfiguration();
     testRMDownForJobStatusBeforeGetAMReport(conf,
     testRMDownForJobStatusBeforeGetAMReport(conf,
         MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES);
         MRJobConfig.DEFAULT_MR_CLIENT_MAX_RETRIES);
@@ -325,7 +328,7 @@ public class TestClientServiceDelegate {
 
 
   @Test
   @Test
   public void testRMDownForJobStatusBeforeGetAMReportWithRetryTimes()
   public void testRMDownForJobStatusBeforeGetAMReportWithRetryTimes()
-      throws IOException {
+      throws IOException, YarnRemoteException {
     Configuration conf = new YarnConfiguration();
     Configuration conf = new YarnConfiguration();
     conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, 2);
     conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, 2);
     testRMDownForJobStatusBeforeGetAMReport(conf, conf.getInt(
     testRMDownForJobStatusBeforeGetAMReport(conf, conf.getInt(
@@ -335,7 +338,7 @@ public class TestClientServiceDelegate {
   
   
   @Test
   @Test
   public void testRMDownRestoreForJobStatusBeforeGetAMReport()
   public void testRMDownRestoreForJobStatusBeforeGetAMReport()
-      throws IOException {
+      throws IOException, YarnRemoteException {
     Configuration conf = new YarnConfiguration();
     Configuration conf = new YarnConfiguration();
     conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, 3);
     conf.setInt(MRJobConfig.MR_CLIENT_MAX_RETRIES, 3);
 
 

+ 5 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/TestResourceMgrDelegate.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.FinalApplicationStatus;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.api.records.YarnApplicationState;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
 import org.mockito.ArgumentCaptor;
@@ -47,9 +48,12 @@ public class TestResourceMgrDelegate {
 
 
   /**
   /**
    * Tests that getRootQueues makes a request for the (recursive) child queues
    * Tests that getRootQueues makes a request for the (recursive) child queues
+   * @throws YarnRemoteException
+   * @throws IOException
    */
    */
   @Test
   @Test
-  public void testGetRootQueues() throws IOException, InterruptedException {
+  public void testGetRootQueues() throws IOException, InterruptedException,
+      YarnRemoteException {
     final ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
     final ClientRMProtocol applicationsManager = Mockito.mock(ClientRMProtocol.class);
     GetQueueInfoResponse response = Mockito.mock(GetQueueInfoResponse.class);
     GetQueueInfoResponse response = Mockito.mock(GetQueueInfoResponse.class);
     org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =
     org.apache.hadoop.yarn.api.records.QueueInfo queueInfo =

+ 2 - 3
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/TestMRJobClient.java

@@ -40,7 +40,6 @@ import org.apache.hadoop.mapreduce.tools.CLI;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.ExitUtil;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.Tool;
 import org.apache.hadoop.util.ToolRunner;
 import org.apache.hadoop.util.ToolRunner;
-import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 
 
 /**
 /**
  test CLI class. CLI class implemented  the Tool interface. 
  test CLI class. CLI class implemented  the Tool interface. 
@@ -155,7 +154,7 @@ public class TestMRJobClient extends ClusterMapReduceTestCase {
     try {
     try {
       runTool(conf, jc, new String[] { "-fail-task", taid.toString() }, out);
       runTool(conf, jc, new String[] { "-fail-task", taid.toString() }, out);
       fail(" this task should field");
       fail(" this task should field");
-    } catch (YarnRemoteException e) {
+    } catch (IOException e) {
       // task completed !
       // task completed !
       assertTrue(e.getMessage().contains("_0001_m_000000_1"));
       assertTrue(e.getMessage().contains("_0001_m_000000_1"));
     }
     }
@@ -175,7 +174,7 @@ public class TestMRJobClient extends ClusterMapReduceTestCase {
     try {
     try {
       runTool(conf, jc, new String[] { "-kill-task", taid.toString() }, out);
       runTool(conf, jc, new String[] { "-kill-task", taid.toString() }, out);
       fail(" this task should be killed");
       fail(" this task should be killed");
-    } catch (YarnRemoteException e) {
+    } catch (IOException e) {
       // task completed
       // task completed
       assertTrue(e.getMessage().contains("_0001_m_000000_1"));
       assertTrue(e.getMessage().contains("_0001_m_000000_1"));
     }
     }

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/security/TestJHSSecurity.java

@@ -62,7 +62,8 @@ public class TestJHSSecurity {
   private static final Log LOG = LogFactory.getLog(TestJHSSecurity.class);
   private static final Log LOG = LogFactory.getLog(TestJHSSecurity.class);
   
   
   @Test
   @Test
-  public void testDelegationToken() throws IOException, InterruptedException {
+  public void testDelegationToken() throws IOException, InterruptedException,
+      YarnRemoteException {
 
 
     Logger rootLogger = LogManager.getRootLogger();
     Logger rootLogger = LogManager.getRootLogger();
     rootLogger.setLevel(Level.DEBUG);
     rootLogger.setLevel(Level.DEBUG);

+ 2 - 1
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/v2/TestMRJobsWithHistoryService.java

@@ -46,6 +46,7 @@ import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationAttemptId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
 import org.apache.hadoop.yarn.api.records.ContainerId;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.server.resourcemanager.rmapp.RMAppState;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.BuilderUtils;
@@ -114,7 +115,7 @@ public class TestMRJobsWithHistoryService {
 
 
   @Test (timeout = 30000)
   @Test (timeout = 30000)
   public void testJobHistoryData() throws IOException, InterruptedException,
   public void testJobHistoryData() throws IOException, InterruptedException,
-      AvroRemoteException, ClassNotFoundException {
+      AvroRemoteException, ClassNotFoundException, YarnRemoteException {
     if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
     if (!(new File(MiniMRYarnCluster.APPJAR)).exists()) {
       LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
       LOG.info("MRAppJar " + MiniMRYarnCluster.APPJAR
           + " not found. Not running test.");
           + " not found. Not running test.");

+ 3 - 0
hadoop-yarn-project/CHANGES.txt

@@ -106,6 +106,9 @@ Release 2.0.5-beta - UNRELEASED
     favour of the copy present in the container token field. 
     favour of the copy present in the container token field. 
     (Vinod Kumar Vavilapalli via sseth)
     (Vinod Kumar Vavilapalli via sseth)
 
 
+    YARN-629. Make YarnRemoteException not be rooted at IOException. (Xuan Gong
+    via vinodkv)
+
   NEW FEATURES
   NEW FEATURES
 
 
     YARN-482. FS: Extend SchedulingMode to intermediate queues. 
     YARN-482. FS: Extend SchedulingMode to intermediate queues. 

+ 1 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/exceptions/YarnRemoteException.java

@@ -18,11 +18,10 @@
 
 
 package org.apache.hadoop.yarn.exceptions;
 package org.apache.hadoop.yarn.exceptions;
 
 
-import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
 import java.io.PrintWriter;
 import java.io.PrintWriter;
 
 
-public abstract class YarnRemoteException extends IOException {
+public abstract class YarnRemoteException extends Exception {
   private static final long serialVersionUID = 1L;
   private static final long serialVersionUID = 1L;
   
   
   public YarnRemoteException() {
   public YarnRemoteException() {

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/src/main/java/org/apache/hadoop/yarn/applications/distributedshell/Client.java

@@ -312,8 +312,9 @@ public class Client extends YarnClientImpl {
    * Main run function for the client
    * Main run function for the client
    * @return true if application completed successfully
    * @return true if application completed successfully
    * @throws IOException
    * @throws IOException
+   * @throws YarnRemoteException
    */
    */
-  public boolean run() throws IOException {
+  public boolean run() throws IOException, YarnRemoteException {
 
 
     LOG.info("Running Client");
     LOG.info("Running Client");
     start();
     start();

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/src/main/java/org/apache/hadoop/yarn/applications/unmanagedamlauncher/UnmanagedAMLauncher.java

@@ -271,7 +271,7 @@ public class UnmanagedAMLauncher {
     amProc.destroy();
     amProc.destroy();
   }
   }
   
   
-  public boolean run() throws IOException {
+  public boolean run() throws IOException, YarnRemoteException {
     LOG.info("Starting Client");
     LOG.info("Starting Client");
     
     
     // Connect to ResourceManager
     // Connect to ResourceManager

+ 9 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/main/java/org/apache/hadoop/yarn/client/RMAdmin.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.protocolrecords.RefreshServiceAclsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RefreshSuperUserGroupsConfigurationRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RefreshUserToGroupsMappingsRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
@@ -187,7 +188,7 @@ public class RMAdmin extends Configured implements Tool {
     return adminProtocol;
     return adminProtocol;
   }
   }
   
   
-  private int refreshQueues() throws IOException {
+  private int refreshQueues() throws IOException, YarnRemoteException {
     // Refresh the queue properties
     // Refresh the queue properties
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RefreshQueuesRequest request = 
     RefreshQueuesRequest request = 
@@ -196,7 +197,7 @@ public class RMAdmin extends Configured implements Tool {
     return 0;
     return 0;
   }
   }
 
 
-  private int refreshNodes() throws IOException {
+  private int refreshNodes() throws IOException, YarnRemoteException {
     // Refresh the nodes
     // Refresh the nodes
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RefreshNodesRequest request = 
     RefreshNodesRequest request = 
@@ -205,7 +206,8 @@ public class RMAdmin extends Configured implements Tool {
     return 0;
     return 0;
   }
   }
   
   
-  private int refreshUserToGroupsMappings() throws IOException {
+  private int refreshUserToGroupsMappings() throws IOException,
+      YarnRemoteException {
     // Refresh the user-to-groups mappings
     // Refresh the user-to-groups mappings
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RefreshUserToGroupsMappingsRequest request = 
     RefreshUserToGroupsMappingsRequest request = 
@@ -214,7 +216,8 @@ public class RMAdmin extends Configured implements Tool {
     return 0;
     return 0;
   }
   }
   
   
-  private int refreshSuperUserGroupsConfiguration() throws IOException {
+  private int refreshSuperUserGroupsConfiguration() throws IOException,
+      YarnRemoteException {
     // Refresh the super-user groups
     // Refresh the super-user groups
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RefreshSuperUserGroupsConfigurationRequest request = 
     RefreshSuperUserGroupsConfigurationRequest request = 
@@ -223,7 +226,7 @@ public class RMAdmin extends Configured implements Tool {
     return 0;
     return 0;
   }
   }
   
   
-  private int refreshAdminAcls() throws IOException {
+  private int refreshAdminAcls() throws IOException, YarnRemoteException {
     // Refresh the admin acls
     // Refresh the admin acls
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RefreshAdminAclsRequest request = 
     RefreshAdminAclsRequest request = 
@@ -232,7 +235,7 @@ public class RMAdmin extends Configured implements Tool {
     return 0;
     return 0;
   }
   }
   
   
-  private int refreshServiceAcls() throws IOException {
+  private int refreshServiceAcls() throws IOException, YarnRemoteException {
     // Refresh the service acls
     // Refresh the service acls
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RMAdminProtocol adminProtocol = createAdminProtocol();
     RefreshServiceAclsRequest request = 
     RefreshServiceAclsRequest request = 

+ 20 - 4
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/ipc/RPCUtil.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.yarn.ipc;
 package org.apache.hadoop.yarn.ipc;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.lang.reflect.Constructor;
 import java.lang.reflect.UndeclaredThrowableException;
 import java.lang.reflect.UndeclaredThrowableException;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
@@ -71,12 +72,27 @@ public class RPCUtil {
       throws UndeclaredThrowableException {
       throws UndeclaredThrowableException {
     if (se.getCause() instanceof RemoteException) {
     if (se.getCause() instanceof RemoteException) {
       try {
       try {
-        throw ((RemoteException) se.getCause())
-            .unwrapRemoteException(YarnRemoteExceptionPBImpl.class);
-      } catch (YarnRemoteException ex) {
-        return ex;
+        RemoteException re = (RemoteException) se.getCause();
+        Class<?> realClass = Class.forName(re.getClassName());
+        //YarnRemoteException is not rooted as IOException.
+        //Do the explicitly check if it is YarnRemoteException
+        if (YarnRemoteException.class.isAssignableFrom(realClass)) {
+          Constructor<? extends YarnRemoteException> cn =
+              realClass.asSubclass(YarnRemoteException.class).getConstructor(
+                  String.class);
+          cn.setAccessible(true);
+          YarnRemoteException ex = cn.newInstance(re.getMessage());
+          ex.initCause(re);
+          return ex;
+        } else {
+          throw ((RemoteException) se.getCause())
+              .unwrapRemoteException(YarnRemoteExceptionPBImpl.class);
+        }
       } catch (IOException e1) {
       } catch (IOException e1) {
         throw new UndeclaredThrowableException(e1);
         throw new UndeclaredThrowableException(e1);
+      } catch (Exception ex) {
+        throw new UndeclaredThrowableException(
+            (RemoteException) se.getCause());
       }
       }
     } else if (se.getCause() instanceof YarnRemoteException) {
     } else if (se.getCause() instanceof YarnRemoteException) {
       return (YarnRemoteException) se.getCause();
       return (YarnRemoteException) se.getCause();

+ 5 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/security/client/RMDelegationTokenIdentifier.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.ClientRMProtocol;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.CancelDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.RenewDelegationTokenRequest;
 import org.apache.hadoop.yarn.api.records.DelegationToken;
 import org.apache.hadoop.yarn.api.records.DelegationToken;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.BuilderUtils;
 import org.apache.hadoop.yarn.util.Records;
 import org.apache.hadoop.yarn.util.Records;
@@ -105,6 +106,8 @@ public class RMDelegationTokenIdentifier extends AbstractDelegationTokenIdentifi
               Records.newRecord(RenewDelegationTokenRequest.class);
               Records.newRecord(RenewDelegationTokenRequest.class);
           request.setDelegationToken(convertToProtoToken(token));
           request.setDelegationToken(convertToProtoToken(token));
           return rmClient.renewDelegationToken(request).getNextExpirationTime();
           return rmClient.renewDelegationToken(request).getNextExpirationTime();
+        } catch (YarnRemoteException e) {
+          throw new IOException(e);
         } finally {
         } finally {
           RPC.stopProxy(rmClient);
           RPC.stopProxy(rmClient);
         }
         }
@@ -125,6 +128,8 @@ public class RMDelegationTokenIdentifier extends AbstractDelegationTokenIdentifi
               Records.newRecord(CancelDelegationTokenRequest.class);
               Records.newRecord(CancelDelegationTokenRequest.class);
           request.setDelegationToken(convertToProtoToken(token));
           request.setDelegationToken(convertToProtoToken(token));
           rmClient.cancelDelegationToken(request);
           rmClient.cancelDelegationToken(request);
+        } catch (YarnRemoteException e) {
+          throw new IOException(e);
         } finally {
         } finally {
           RPC.stopProxy(rmClient);
           RPC.stopProxy(rmClient);
         }
         }

+ 1 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/ContainerManagerImpl.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
+import org.apache.hadoop.yarn.YarnException;
 import org.apache.hadoop.yarn.api.ContainerManager;
 import org.apache.hadoop.yarn.api.ContainerManager;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusRequest;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;
 import org.apache.hadoop.yarn.api.protocolrecords.GetContainerStatusResponse;

+ 7 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestContainerManagerWithLCE.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.UnsupportedFileSystemException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.TestContainerManager;
 import org.junit.After;
 import org.junit.After;
 
 
@@ -73,7 +74,8 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
   }
   }
 
 
   @Override
   @Override
-  public void testContainerSetup() throws IOException, InterruptedException {
+  public void testContainerSetup() throws IOException, InterruptedException,
+      YarnRemoteException {
     // Don't run the test if the binary is not available.
     // Don't run the test if the binary is not available.
     if (!shouldRunTest()) {
     if (!shouldRunTest()) {
       LOG.info("LCE binary path is not passed. Not running the test");
       LOG.info("LCE binary path is not passed. Not running the test");
@@ -96,7 +98,7 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
 
 
   @Override
   @Override
   public void testContainerLaunchAndStop() throws IOException,
   public void testContainerLaunchAndStop() throws IOException,
-      InterruptedException {
+      InterruptedException, YarnRemoteException {
     // Don't run the test if the binary is not available.
     // Don't run the test if the binary is not available.
     if (!shouldRunTest()) {
     if (!shouldRunTest()) {
       LOG.info("LCE binary path is not passed. Not running the test");
       LOG.info("LCE binary path is not passed. Not running the test");
@@ -108,7 +110,7 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
   
   
   @Override
   @Override
   public void testContainerLaunchAndExitSuccess() throws IOException,
   public void testContainerLaunchAndExitSuccess() throws IOException,
-      InterruptedException {
+      InterruptedException, YarnRemoteException {
     // Don't run the test if the binary is not available.
     // Don't run the test if the binary is not available.
     if (!shouldRunTest()) {
     if (!shouldRunTest()) {
       LOG.info("LCE binary path is not passed. Not running the test");
       LOG.info("LCE binary path is not passed. Not running the test");
@@ -120,7 +122,7 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
 
 
   @Override
   @Override
   public void testContainerLaunchAndExitFailure() throws IOException,
   public void testContainerLaunchAndExitFailure() throws IOException,
-      InterruptedException {
+      InterruptedException, YarnRemoteException {
     // Don't run the test if the binary is not available.
     // Don't run the test if the binary is not available.
     if (!shouldRunTest()) {
     if (!shouldRunTest()) {
       LOG.info("LCE binary path is not passed. Not running the test");
       LOG.info("LCE binary path is not passed. Not running the test");
@@ -132,7 +134,7 @@ public class TestContainerManagerWithLCE extends TestContainerManager {
   
   
   @Override
   @Override
   public void testLocalFilesCleanup() throws InterruptedException,
   public void testLocalFilesCleanup() throws InterruptedException,
-      IOException {
+      IOException, YarnRemoteException {
     // Don't run the test if the binary is not available.
     // Don't run the test if the binary is not available.
     if (!shouldRunTest()) {
     if (!shouldRunTest()) {
       LOG.info("LCE binary path is not passed. Not running the test");
       LOG.info("LCE binary path is not passed. Not running the test");

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestEventFlow.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.api.ResourceTracker;
 import org.apache.hadoop.yarn.server.api.ResourceTracker;
@@ -62,7 +63,7 @@ public class TestEventFlow {
 
 
   @Test
   @Test
   public void testSuccessfulContainerLaunch() throws InterruptedException,
   public void testSuccessfulContainerLaunch() throws InterruptedException,
-      IOException {
+      IOException, YarnRemoteException {
 
 
     FileContext localFS = FileContext.getLocalFSFileContext();
     FileContext localFS = FileContext.getLocalFSFileContext();
 
 

+ 3 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerReboot.java

@@ -45,6 +45,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.Container;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.container.ContainerState;
@@ -87,7 +88,8 @@ public class TestNodeManagerReboot {
   }
   }
 
 
   @Test(timeout = 20000)
   @Test(timeout = 20000)
-  public void testClearLocalDirWhenNodeReboot() throws IOException {
+  public void testClearLocalDirWhenNodeReboot() throws IOException,
+      YarnRemoteException {
     nm = new MyNodeManager();
     nm = new MyNodeManager();
     nm.start();
     nm.start();
     // create files under fileCache
     // create files under fileCache

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerResync.java

@@ -84,7 +84,7 @@ public class TestNodeManagerResync {
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
   @Test
   @Test
   public void testKillContainersOnResync() throws IOException,
   public void testKillContainersOnResync() throws IOException,
-      InterruptedException {
+      InterruptedException, YarnRemoteException {
     NodeManager nm = new TestNodeManager1();
     NodeManager nm = new TestNodeManager1();
     YarnConfiguration conf = createNMConfig();
     YarnConfiguration conf = createNMConfig();
     nm.init(conf);
     nm.init(conf);
@@ -112,7 +112,7 @@ public class TestNodeManagerResync {
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
   @Test
   @Test
   public void testBlockNewContainerRequestsOnStartAndResync()
   public void testBlockNewContainerRequestsOnStartAndResync()
-      throws IOException, InterruptedException {
+      throws IOException, InterruptedException, YarnRemoteException {
     NodeManager nm = new TestNodeManager2();
     NodeManager nm = new TestNodeManager2();
     YarnConfiguration conf = createNMConfig();
     YarnConfiguration conf = createNMConfig();
     nm.init(conf);
     nm.init(conf);

+ 4 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/TestNodeManagerShutdown.java

@@ -54,6 +54,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.Dispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
 import org.apache.hadoop.yarn.server.nodemanager.containermanager.ContainerManagerImpl;
@@ -97,7 +98,8 @@ public class TestNodeManagerShutdown {
   }
   }
   
   
   @Test
   @Test
-  public void testKillContainersOnShutdown() throws IOException {
+  public void testKillContainersOnShutdown() throws IOException,
+      YarnRemoteException {
     NodeManager nm = getNodeManager();
     NodeManager nm = getNodeManager();
     nm.init(createNMConfig());
     nm.init(createNMConfig());
     nm.start();
     nm.start();
@@ -144,7 +146,7 @@ public class TestNodeManagerShutdown {
 
 
   public static void startContainer(NodeManager nm, ContainerId cId,
   public static void startContainer(NodeManager nm, ContainerId cId,
       FileContext localFS, File scriptFileDir, File processStartFile)
       FileContext localFS, File scriptFileDir, File processStartFile)
-      throws IOException {
+      throws IOException, YarnRemoteException {
     ContainerManagerImpl containerManager = nm.getContainerManager();
     ContainerManagerImpl containerManager = nm.getContainerManager();
     File scriptFile =
     File scriptFile =
         createUnhaltingScriptFile(cId, scriptFileDir, processStartFile);
         createUnhaltingScriptFile(cId, scriptFileDir, processStartFile);

+ 10 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/TestContainerManager.java

@@ -111,7 +111,8 @@ public class TestContainerManager extends BaseContainerManagerTest {
   }
   }
 
 
   @Test
   @Test
-  public void testContainerSetup() throws IOException, InterruptedException {
+  public void testContainerSetup() throws IOException, InterruptedException,
+      YarnRemoteException {
 
 
     containerManager.start();
     containerManager.start();
 
 
@@ -202,7 +203,7 @@ public class TestContainerManager extends BaseContainerManagerTest {
 
 
   @Test
   @Test
   public void testContainerLaunchAndStop() throws IOException,
   public void testContainerLaunchAndStop() throws IOException,
-      InterruptedException {
+      InterruptedException, YarnRemoteException {
     containerManager.start();
     containerManager.start();
 
 
     File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
     File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
@@ -309,7 +310,8 @@ public class TestContainerManager extends BaseContainerManagerTest {
       DefaultContainerExecutor.containerIsAlive(pid));
       DefaultContainerExecutor.containerIsAlive(pid));
   }
   }
   
   
-  private void testContainerLaunchAndExit(int exitCode) throws IOException, InterruptedException {
+  private void testContainerLaunchAndExit(int exitCode) throws IOException,
+      InterruptedException, YarnRemoteException {
 
 
 	  File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
 	  File scriptFile = Shell.appendScriptExtension(tmpDir, "scriptFile");
 	  PrintWriter fileWriter = new PrintWriter(scriptFile);
 	  PrintWriter fileWriter = new PrintWriter(scriptFile);
@@ -389,7 +391,8 @@ public class TestContainerManager extends BaseContainerManagerTest {
   }
   }
   
   
   @Test
   @Test
-  public void testContainerLaunchAndExitSuccess() throws IOException, InterruptedException {
+  public void testContainerLaunchAndExitSuccess() throws IOException,
+      InterruptedException, YarnRemoteException {
 	  containerManager.start();
 	  containerManager.start();
 	  int exitCode = 0; 
 	  int exitCode = 0; 
 
 
@@ -399,7 +402,8 @@ public class TestContainerManager extends BaseContainerManagerTest {
   }
   }
 
 
   @Test
   @Test
-  public void testContainerLaunchAndExitFailure() throws IOException, InterruptedException {
+  public void testContainerLaunchAndExitFailure() throws IOException,
+      InterruptedException, YarnRemoteException {
 	  containerManager.start();
 	  containerManager.start();
 	  int exitCode = 50; 
 	  int exitCode = 50; 
 
 
@@ -410,7 +414,7 @@ public class TestContainerManager extends BaseContainerManagerTest {
   
   
   @Test
   @Test
   public void testLocalFilesCleanup() throws InterruptedException,
   public void testLocalFilesCleanup() throws InterruptedException,
-      IOException {
+      IOException, YarnRemoteException {
     // Real del service
     // Real del service
     delSrvc = new DeletionService(exec);
     delSrvc = new DeletionService(exec);
     delSrvc.init(conf);
     delSrvc.init(conf);

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java

@@ -68,6 +68,7 @@ import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.DrainDispatcher;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
 import org.apache.hadoop.yarn.logaggregation.AggregatedLogFormat;
@@ -663,7 +664,7 @@ public class TestLogAggregationService extends BaseContainerManagerTest {
 
 
   @Test
   @Test
   public void testLogAggregationForRealContainerLaunch() throws IOException,
   public void testLogAggregationForRealContainerLaunch() throws IOException,
-      InterruptedException {
+      InterruptedException, YarnRemoteException {
 
 
     this.containerManager.start();
     this.containerManager.start();
 
 

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/monitor/TestContainersMonitor.java

@@ -55,6 +55,7 @@ import org.apache.hadoop.yarn.api.records.LocalResourceVisibility;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.api.records.URL;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.ExitCode;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
 import org.apache.hadoop.yarn.server.nodemanager.ContainerExecutor.Signal;
@@ -177,7 +178,7 @@ public class TestContainersMonitor extends BaseContainerManagerTest {
 
 
   @Test
   @Test
   public void testContainerKillOnMemoryOverflow() throws IOException,
   public void testContainerKillOnMemoryOverflow() throws IOException,
-      InterruptedException {
+      InterruptedException, YarnRemoteException {
 
 
     if (!ProcfsBasedProcessTree.isAvailable()) {
     if (!ProcfsBasedProcessTree.isAvailable()) {
       return;
       return;

+ 5 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/amlauncher/AMLauncher.java

@@ -50,6 +50,7 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.api.records.NodeId;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
 import org.apache.hadoop.yarn.ipc.YarnRPC;
@@ -98,7 +99,7 @@ public class AMLauncher implements Runnable {
     containerMgrProxy = getContainerMgrProxy(masterContainerID);
     containerMgrProxy = getContainerMgrProxy(masterContainerID);
   }
   }
   
   
-  private void launch() throws IOException {
+  private void launch() throws IOException, YarnRemoteException {
     connect();
     connect();
     ContainerId masterContainerID = masterContainer.getId();
     ContainerId masterContainerID = masterContainer.getId();
     ApplicationSubmissionContext applicationContext =
     ApplicationSubmissionContext applicationContext =
@@ -116,7 +117,7 @@ public class AMLauncher implements Runnable {
         + " for AM " + application.getAppAttemptId());
         + " for AM " + application.getAppAttemptId());
   }
   }
   
   
-  private void cleanup() throws IOException {
+  private void cleanup() throws IOException, YarnRemoteException {
     connect();
     connect();
     ContainerId containerId = masterContainer.getId();
     ContainerId containerId = masterContainer.getId();
     StopContainerRequest stopRequest = 
     StopContainerRequest stopRequest = 
@@ -256,6 +257,8 @@ public class AMLauncher implements Runnable {
         cleanup();
         cleanup();
       } catch(IOException ie) {
       } catch(IOException ie) {
         LOG.info("Error cleaning master ", ie);
         LOG.info("Error cleaning master ", ie);
+      } catch (YarnRemoteException e) {
+        LOG.info("Error cleaning master ", e);
       }
       }
       break;
       break;
     default:
     default:

+ 7 - 5
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/Application.java

@@ -46,6 +46,7 @@ import org.apache.hadoop.yarn.api.records.ContainerLaunchContext;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.Task.State;
 import org.apache.hadoop.yarn.server.resourcemanager.Task.State;
@@ -127,7 +128,7 @@ public class Application {
     return used;
     return used;
   }
   }
   
   
-  public synchronized void submit() throws IOException {
+  public synchronized void submit() throws IOException, YarnRemoteException {
     ApplicationSubmissionContext context = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
     ApplicationSubmissionContext context = recordFactory.newRecordInstance(ApplicationSubmissionContext.class);
     context.setApplicationId(this.applicationId);
     context.setApplicationId(this.applicationId);
     context.getAMContainerSpec().setUser(this.user);
     context.getAMContainerSpec().setUser(this.user);
@@ -201,7 +202,8 @@ public class Application {
     addResourceRequest(priority, requests, ResourceRequest.ANY, capability);
     addResourceRequest(priority, requests, ResourceRequest.ANY, capability);
   }
   }
   
   
-  public synchronized void finishTask(Task task) throws IOException {
+  public synchronized void finishTask(Task task) throws IOException,
+      YarnRemoteException {
     Set<Task> tasks = this.tasks.get(task.getPriority());
     Set<Task> tasks = this.tasks.get(task.getPriority());
     if (!tasks.remove(task)) {
     if (!tasks.remove(task)) {
       throw new IllegalStateException(
       throw new IllegalStateException(
@@ -288,7 +290,7 @@ public class Application {
   }
   }
   
   
   public synchronized void assign(List<Container> containers) 
   public synchronized void assign(List<Container> containers) 
-  throws IOException {
+  throws IOException, YarnRemoteException {
     
     
     int numContainers = containers.size();
     int numContainers = containers.size();
     // Schedule in priority order
     // Schedule in priority order
@@ -307,12 +309,12 @@ public class Application {
         assignedContainers + "/" + numContainers);
         assignedContainers + "/" + numContainers);
   }
   }
   
   
-  public synchronized void schedule() throws IOException {
+  public synchronized void schedule() throws IOException, YarnRemoteException {
     assign(getResources());
     assign(getResources());
   }
   }
   
   
   private synchronized void assign(Priority priority, NodeType type, 
   private synchronized void assign(Priority priority, NodeType type, 
-      List<Container> containers) throws IOException {
+      List<Container> containers) throws IOException, YarnRemoteException {
     for (Iterator<Container> i=containers.iterator(); i.hasNext();) {
     for (Iterator<Container> i=containers.iterator(); i.hasNext();) {
       Container container = i.next();
       Container container = i.next();
       String host = container.getNodeId().toString();
       String host = container.getNodeId().toString();

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/NodeManager.java

@@ -81,7 +81,7 @@ public class NodeManager implements ContainerManager {
   public NodeManager(String hostName, int containerManagerPort, int httpPort,
   public NodeManager(String hostName, int containerManagerPort, int httpPort,
       String rackName, Resource capability,
       String rackName, Resource capability,
       ResourceTrackerService resourceTrackerService, RMContext rmContext)
       ResourceTrackerService resourceTrackerService, RMContext rmContext)
-      throws IOException {
+      throws IOException, YarnRemoteException {
     this.containerManagerAddress = hostName + ":" + containerManagerPort;
     this.containerManagerAddress = hostName + ":" + containerManagerPort;
     this.nodeHttpAddress = hostName + ":" + httpPort;
     this.nodeHttpAddress = hostName + ":" + httpPort;
     this.rackName = rackName;
     this.rackName = rackName;
@@ -144,7 +144,7 @@ public class NodeManager implements ContainerManager {
     }
     }
     return containerStatuses;
     return containerStatuses;
   }
   }
-  public void heartbeat() throws IOException {
+  public void heartbeat() throws IOException, YarnRemoteException {
     NodeStatus nodeStatus = 
     NodeStatus nodeStatus = 
       org.apache.hadoop.yarn.server.resourcemanager.NodeManager.createNodeStatus(
       org.apache.hadoop.yarn.server.resourcemanager.NodeManager.createNodeStatus(
           nodeId, getContainerStatuses(containers));
           nodeId, getContainerStatuses(containers));

+ 14 - 9
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMService.java

@@ -205,15 +205,19 @@ public class TestClientRMService {
       owner.doAs(new PrivilegedExceptionAction<Void>() {
       owner.doAs(new PrivilegedExceptionAction<Void>() {
         @Override
         @Override
         public Void run() throws Exception {
         public Void run() throws Exception {
-          checkTokenRenewal(owner, other);
-          return null;
+          try {
+            checkTokenRenewal(owner, other);
+            return null;
+          } catch (YarnRemoteException ex) {
+            Assert.assertEquals(ex.getMessage(),
+                "Client " + owner.getUserName() +
+                " tries to renew a token with renewer specified as " +
+                other.getUserName());
+            throw ex;
+          }
         }
         }
       });
       });
-    } catch (YarnRemoteException e) {
-      Assert.assertEquals(e.getMessage(),
-          "Client " + owner.getUserName() +
-          " tries to renew a token with renewer specified as " +
-          other.getUserName());
+    } catch (Exception e) {
       return;
       return;
     }
     }
     Assert.fail("renew should have failed");
     Assert.fail("renew should have failed");
@@ -232,7 +236,7 @@ public class TestClientRMService {
   }
   }
 
 
   private void checkTokenRenewal(UserGroupInformation owner,
   private void checkTokenRenewal(UserGroupInformation owner,
-      UserGroupInformation renewer) throws IOException {
+      UserGroupInformation renewer) throws IOException, YarnRemoteException {
     RMDelegationTokenIdentifier tokenIdentifier =
     RMDelegationTokenIdentifier tokenIdentifier =
         new RMDelegationTokenIdentifier(
         new RMDelegationTokenIdentifier(
             new Text(owner.getUserName()), new Text(renewer.getUserName()), null);
             new Text(owner.getUserName()), new Text(renewer.getUserName()), null);
@@ -312,7 +316,8 @@ public class TestClientRMService {
   
   
   @Test(timeout=4000)
   @Test(timeout=4000)
   public void testConcurrentAppSubmit()
   public void testConcurrentAppSubmit()
-      throws IOException, InterruptedException, BrokenBarrierException {
+      throws IOException, InterruptedException, BrokenBarrierException,
+      YarnRemoteException {
     YarnScheduler yarnScheduler = mockYarnScheduler();
     YarnScheduler yarnScheduler = mockYarnScheduler();
     RMContext rmContext = mock(RMContext.class);
     RMContext rmContext = mock(RMContext.class);
     mockRMContext(yarnScheduler, rmContext);
     mockRMContext(yarnScheduler, rmContext);

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestClientRMTokens.java

@@ -72,7 +72,8 @@ public class TestClientRMTokens {
   }
   }
   
   
   @Test
   @Test
-  public void testDelegationToken() throws IOException, InterruptedException {
+  public void testDelegationToken() throws IOException, InterruptedException,
+      YarnRemoteException {
     
     
     final YarnConfiguration conf = new YarnConfiguration();
     final YarnConfiguration conf = new YarnConfiguration();
     conf.set(YarnConfiguration.RM_PRINCIPAL, "testuser/localhost@apache.org");
     conf.set(YarnConfiguration.RM_PRINCIPAL, "testuser/localhost@apache.org");

+ 5 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/TestResourceManager.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.yarn.api.records.Priority;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.resource.Resources;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
 import org.junit.After;
 import org.junit.After;
@@ -59,7 +60,8 @@ public class TestResourceManager {
 
 
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
       registerNode(String hostName, int containerManagerPort, int httpPort,
       registerNode(String hostName, int containerManagerPort, int httpPort,
-          String rackName, Resource capability) throws IOException {
+          String rackName, Resource capability) throws IOException,
+          YarnRemoteException {
     return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
     return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
         hostName, containerManagerPort, httpPort, rackName, capability,
         hostName, containerManagerPort, httpPort, rackName, capability,
         resourceManager.getResourceTrackerService(), resourceManager
         resourceManager.getResourceTrackerService(), resourceManager
@@ -67,7 +69,8 @@ public class TestResourceManager {
   }
   }
 
 
 //  @Test
 //  @Test
-  public void testResourceAllocation() throws IOException {
+  public void testResourceAllocation() throws IOException,
+      YarnRemoteException {
     LOG.info("--- START: testResourceAllocation ---");
     LOG.info("--- START: testResourceAllocation ---");
         
         
     final int memory = 4 * 1024;
     final int memory = 4 * 1024;

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/resourcetracker/TestRMNMRPCResponseId.java

@@ -28,6 +28,7 @@ import org.apache.hadoop.yarn.event.Dispatcher;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.Event;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.EventHandler;
 import org.apache.hadoop.yarn.event.InlineDispatcher;
 import org.apache.hadoop.yarn.event.InlineDispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
 import org.apache.hadoop.yarn.server.api.protocolrecords.NodeHeartbeatRequest;
@@ -88,7 +89,7 @@ public class TestRMNMRPCResponseId {
   }
   }
 
 
   @Test
   @Test
-  public void testRPCResponseId() throws IOException {
+  public void testRPCResponseId() throws IOException, YarnRemoteException {
     String node = "localhost";
     String node = "localhost";
     Resource capability = BuilderUtils.newResource(1024, 1);
     Resource capability = BuilderUtils.newResource(1024, 1);
     RegisterNodeManagerRequest request = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);
     RegisterNodeManagerRequest request = recordFactory.newRecordInstance(RegisterNodeManagerRequest.class);

+ 2 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/capacity/TestCapacityScheduler.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.yarn.api.records.Resource;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.server.resourcemanager.Application;
 import org.apache.hadoop.yarn.server.resourcemanager.Application;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.MockNodes;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
 import org.apache.hadoop.yarn.server.resourcemanager.RMContextImpl;
@@ -101,7 +102,7 @@ public class TestCapacityScheduler {
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
       registerNode(String hostName, int containerManagerPort, int httpPort,
       registerNode(String hostName, int containerManagerPort, int httpPort,
           String rackName, Resource capability)
           String rackName, Resource capability)
-          throws IOException {
+          throws IOException, YarnRemoteException {
     return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
     return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
         hostName, containerManagerPort, httpPort, rackName, capability,
         hostName, containerManagerPort, httpPort, rackName, capability,
         resourceManager.getResourceTrackerService(), resourceManager
         resourceManager.getResourceTrackerService(), resourceManager

+ 3 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fifo/TestFifoScheduler.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.yarn.api.records.ResourceRequest;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.AsyncDispatcher;
 import org.apache.hadoop.yarn.event.InlineDispatcher;
 import org.apache.hadoop.yarn.event.InlineDispatcher;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factories.RecordFactory;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.factory.providers.RecordFactoryProvider;
 import org.apache.hadoop.yarn.server.resourcemanager.Application;
 import org.apache.hadoop.yarn.server.resourcemanager.Application;
@@ -84,7 +85,8 @@ public class TestFifoScheduler {
   
   
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
   private org.apache.hadoop.yarn.server.resourcemanager.NodeManager
       registerNode(String hostName, int containerManagerPort, int nmHttpPort,
       registerNode(String hostName, int containerManagerPort, int nmHttpPort,
-          String rackName, Resource capability) throws IOException {
+          String rackName, Resource capability) throws IOException,
+          YarnRemoteException {
     return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
     return new org.apache.hadoop.yarn.server.resourcemanager.NodeManager(
         hostName, containerManagerPort, nmHttpPort, rackName, capability,
         hostName, containerManagerPort, nmHttpPort, rackName, capability,
         resourceManager.getResourceTrackerService(), resourceManager
         resourceManager.getResourceTrackerService(), resourceManager

+ 16 - 13
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/security/TestClientTokens.java

@@ -78,7 +78,7 @@ public class TestClientTokens {
   private interface CustomProtocol {
   private interface CustomProtocol {
     public static final long versionID = 1L;
     public static final long versionID = 1L;
 
 
-    public void ping();
+    public void ping() throws YarnRemoteException;
   }
   }
 
 
   private static class CustomSecurityInfo extends SecurityInfo {
   private static class CustomSecurityInfo extends SecurityInfo {
@@ -121,7 +121,7 @@ public class TestClientTokens {
     }
     }
 
 
     @Override
     @Override
-    public void ping() {
+    public void ping() throws YarnRemoteException {
       this.pinged = true;
       this.pinged = true;
     }
     }
 
 
@@ -270,21 +270,24 @@ public class TestClientTokens {
     ugi.addToken(maliciousToken);
     ugi.addToken(maliciousToken);
 
 
     try {
     try {
-      ugi.doAs(new PrivilegedExceptionAction<Void>() {
+      ugi.doAs(new PrivilegedExceptionAction<Void>()  {
         @Override
         @Override
         public Void run() throws Exception {
         public Void run() throws Exception {
-          CustomProtocol client =
-              (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L,
-                am.address, conf);
-          client.ping();
-          fail("Connection initiation with illegally modified "
-              + "tokens is expected to fail.");
-          return null;
+          try {
+            CustomProtocol client =
+                (CustomProtocol) RPC.getProxy(CustomProtocol.class, 1L,
+                  am.address, conf);
+            client.ping();
+            fail("Connection initiation with illegally modified "
+                + "tokens is expected to fail.");
+            return null;
+          } catch (YarnRemoteException ex) {
+            fail("Cannot get a YARN remote exception as "
+                + "it will indicate RPC success");
+            throw ex;
+          }
         }
         }
       });
       });
-    } catch (YarnRemoteException e) {
-      fail("Cannot get a YARN remote exception as "
-          + "it will indicate RPC success");
     } catch (Exception e) {
     } catch (Exception e) {
       Assert
       Assert
         .assertEquals(java.lang.reflect.UndeclaredThrowableException.class
         .assertEquals(java.lang.reflect.UndeclaredThrowableException.class

+ 2 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java

@@ -362,7 +362,7 @@ public class MiniYARNCluster extends CompositeService {
                   NodeHeartbeatResponse.class);
                   NodeHeartbeatResponse.class);
               try {
               try {
                 response = rt.nodeHeartbeat(request);
                 response = rt.nodeHeartbeat(request);
-              } catch (IOException ioe) {
+              } catch (YarnRemoteException ioe) {
                 LOG.info("Exception in heartbeat from node " + 
                 LOG.info("Exception in heartbeat from node " + 
                     request.getNodeStatus().getNodeId(), ioe);
                     request.getNodeStatus().getNodeId(), ioe);
                 throw RPCUtil.getRemoteException(ioe);
                 throw RPCUtil.getRemoteException(ioe);
@@ -378,7 +378,7 @@ public class MiniYARNCluster extends CompositeService {
                   newRecordInstance(RegisterNodeManagerResponse.class);
                   newRecordInstance(RegisterNodeManagerResponse.class);
               try {
               try {
                 response = rt.registerNodeManager(request);
                 response = rt.registerNodeManager(request);
-              } catch (IOException ioe) {
+              } catch (YarnRemoteException ioe) {
                 LOG.info("Exception in node registration from "
                 LOG.info("Exception in node registration from "
                     + request.getNodeId().toString(), ioe);
                     + request.getNodeId().toString(), ioe);
                 throw RPCUtil.getRemoteException(ioe);
                 throw RPCUtil.getRemoteException(ioe);

+ 5 - 3
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java

@@ -128,7 +128,7 @@ public class TestContainerManagerSecurity {
 
 
   @Test
   @Test
   public void testAuthenticatedUser() throws IOException,
   public void testAuthenticatedUser() throws IOException,
-      InterruptedException {
+      InterruptedException, YarnRemoteException {
 
 
     LOG.info("Running test for authenticated user");
     LOG.info("Running test for authenticated user");
 
 
@@ -180,7 +180,8 @@ public class TestContainerManagerSecurity {
   }
   }
 
 
   @Test
   @Test
-  public void testMaliceUser() throws IOException, InterruptedException {
+  public void testMaliceUser() throws IOException, InterruptedException,
+      YarnRemoteException {
 
 
     LOG.info("Running test for malice user");
     LOG.info("Running test for malice user");
 
 
@@ -266,7 +267,8 @@ public class TestContainerManagerSecurity {
   }
   }
 
 
   @Test
   @Test
-  public void testUnauthorizedUser() throws IOException, InterruptedException {
+  public void testUnauthorizedUser() throws IOException, InterruptedException,
+      YarnRemoteException {
 
 
     LOG.info("\n\nRunning test for malice user");
     LOG.info("\n\nRunning test for malice user");
 
 

+ 5 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/main/java/org/apache/hadoop/yarn/server/webproxy/WebAppProxyServlet.java

@@ -50,6 +50,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationId;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.api.records.ApplicationReport;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
 import org.apache.hadoop.yarn.conf.YarnConfiguration;
+import org.apache.hadoop.yarn.exceptions.YarnRemoteException;
 import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.Apps;
 import org.apache.hadoop.yarn.util.StringHelper;
 import org.apache.hadoop.yarn.util.StringHelper;
 import org.apache.hadoop.yarn.util.TrackingUriPlugin;
 import org.apache.hadoop.yarn.util.TrackingUriPlugin;
@@ -215,7 +216,8 @@ public class WebAppProxyServlet extends HttpServlet {
     return false;
     return false;
   }
   }
   
   
-  private ApplicationReport getApplicationReport(ApplicationId id) throws IOException {
+  private ApplicationReport getApplicationReport(ApplicationId id)
+      throws IOException, YarnRemoteException {
     return ((AppReportFetcher) getServletContext()
     return ((AppReportFetcher) getServletContext()
         .getAttribute(WebAppProxy.FETCHER_ATTRIBUTE)).getApplicationReport(id);
         .getAttribute(WebAppProxy.FETCHER_ATTRIBUTE)).getApplicationReport(id);
   }
   }
@@ -333,6 +335,8 @@ public class WebAppProxyServlet extends HttpServlet {
 
 
     } catch(URISyntaxException e) {
     } catch(URISyntaxException e) {
       throw new IOException(e); 
       throw new IOException(e); 
+    } catch (YarnRemoteException e) {
+      throw new IOException(e);
     }
     }
   }
   }
 }
 }