瀏覽代碼

MAPREDUCE-2797. Update mapreduce tests and RAID for HDFS-2239.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1156215 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 14 年之前
父節點
當前提交
2d653c994c

+ 2 - 0
mapreduce/CHANGES.txt

@@ -381,6 +381,8 @@ Trunk (unreleased changes)
     MAPREDUCE-2760. mapreduce.jobtracker.split.metainfo.maxsize typoed
     in mapred-default.xml. (todd via eli)
 
+    MAPREDUCE-2797. Update mapreduce tests and RAID for HDFS-2239.  (szetszwo)
+
 Release 0.22.0 - Unreleased
 
   INCOMPATIBLE CHANGES

+ 2 - 2
mapreduce/src/contrib/raid/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRaid.java

@@ -543,7 +543,7 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
     }
     // remove the prefix
     String src = parity.substring(prefix.length());
-    if (NameNodeRaidUtil.getFileInfo(namesystem.dir, src, true) == null) {
+    if (NameNodeRaidUtil.getFileInfo(namesystem, src, true) == null) {
       return null;
     }
     return src;
@@ -575,7 +575,7 @@ public class BlockPlacementPolicyRaid extends BlockPlacementPolicy {
   private String getParityFile(String parityPrefix, String src)
       throws IOException {
     String parity = parityPrefix + src;
-    if (NameNodeRaidUtil.getFileInfo(namesystem.dir, parity, true) == null) {
+    if (NameNodeRaidUtil.getFileInfo(namesystem, parity, true) == null) {
       return null;
     }
     return parity;

+ 2 - 1
mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestBinaryTokenFile.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
@@ -147,7 +148,7 @@ public class TestBinaryTokenFile {
         dfsCluster.getFileSystem().getUri().toString(), 1, null, null, null, 
         jConf);
 
-    dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+    NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
     FileSystem fs = dfsCluster.getFileSystem();
     
     p1 = new Path("file1");

+ 3 - 2
mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCache.java

@@ -45,6 +45,7 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenSecretManager;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.mapred.JobConf;
@@ -157,7 +158,7 @@ public class TestTokenCache {
     
     createTokenFileJson();
     verifySecretKeysInJSONFile();
-    dfsCluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
+    NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
     FileSystem fs = dfsCluster.getFileSystem();
     
     p1 = new Path("file1");
@@ -303,7 +304,7 @@ public class TestTokenCache {
     HftpFileSystem hfs = mock(HftpFileSystem.class);
 
     DelegationTokenSecretManager dtSecretManager = 
-      dfsCluster.getNamesystem().getDelegationTokenSecretManager();
+        NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem());
     String renewer = "renewer";
     jConf.set(JTConfig.JT_USER_NAME,renewer);
     DelegationTokenIdentifier dtId = 

+ 2 - 2
mapreduce/src/test/mapred/org/apache/hadoop/mapreduce/security/TestTokenCacheOldApi.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.NullWritable;
 import org.apache.hadoop.io.Text;
@@ -185,8 +186,7 @@ public class TestTokenCacheOldApi {
     
     createTokenFileJson();
     verifySecretKeysInJSONFile();
-    dfsCluster.getNamesystem()
-				.getDelegationTokenSecretManager().startThreads();
+    NameNodeAdapter.getDtSecretManager(dfsCluster.getNamesystem()).startThreads();
     FileSystem fs = dfsCluster.getFileSystem();
     
     p1 = new Path("file1");