|
@@ -33,7 +33,6 @@ import org.apache.hadoop.fs.FileSystem;
|
|
|
import org.apache.hadoop.fs.Path;
|
|
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
|
import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
|
|
|
-import org.apache.hadoop.hdfs.protocolR23Compatible.ClientNamenodeWireProtocol;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.NameNode;
|
|
|
import org.apache.hadoop.ipc.RPC;
|
|
|
import org.apache.hadoop.ipc.RemoteException;
|
|
@@ -61,7 +60,6 @@ public class TestSubmitJob {
|
|
|
new Path(System.getProperty("test.build.data","/tmp"),
|
|
|
"job-submission-testing");
|
|
|
|
|
|
-
|
|
|
/**
|
|
|
* Test to verify that jobs with invalid memory requirements are killed at the
|
|
|
* JT.
|
|
@@ -109,8 +107,9 @@ public class TestSubmitJob {
|
|
|
runJobAndVerifyFailure(jobConf, 1 * 1024L, 5 * 1024L,
|
|
|
"Exceeds the cluster's max-memory-limit.");
|
|
|
} finally {
|
|
|
- if (mrCluster != null)
|
|
|
+ if (mrCluster != null) {
|
|
|
mrCluster.shutdown();
|
|
|
+ }
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -148,16 +147,16 @@ public class TestSubmitJob {
|
|
|
conf, NetUtils.getSocketFactory(conf, ClientProtocol.class));
|
|
|
}
|
|
|
|
|
|
- static ClientNamenodeWireProtocol getDFSClient(
|
|
|
+ static org.apache.hadoop.hdfs.protocol.ClientProtocol getDFSClient(
|
|
|
Configuration conf, UserGroupInformation ugi)
|
|
|
throws IOException {
|
|
|
- return (ClientNamenodeWireProtocol)
|
|
|
- RPC.getProxy(ClientNamenodeWireProtocol.class,
|
|
|
- ClientNamenodeWireProtocol.versionID,
|
|
|
+ return (org.apache.hadoop.hdfs.protocol.ClientProtocol)
|
|
|
+ RPC.getProxy(org.apache.hadoop.hdfs.protocol.ClientProtocol.class,
|
|
|
+ org.apache.hadoop.hdfs.protocol.ClientProtocol.versionID,
|
|
|
NameNode.getAddress(conf), ugi,
|
|
|
conf,
|
|
|
NetUtils.getSocketFactory(conf,
|
|
|
- ClientNamenodeWireProtocol.class));
|
|
|
+ org.apache.hadoop.hdfs.protocol.ClientProtocol.class));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -226,7 +225,7 @@ public class TestSubmitJob {
|
|
|
UserGroupInformation user2 =
|
|
|
TestMiniMRWithDFSWithDistinctUsers.createUGI("user2", false);
|
|
|
JobConf conf_other = mr.createJobConf();
|
|
|
- ClientNamenodeWireProtocol client =
|
|
|
+ org.apache.hadoop.hdfs.protocol.ClientProtocol client =
|
|
|
getDFSClient(conf_other, user2);
|
|
|
|
|
|
// try accessing mapred.system.dir/jobid/*
|