|
@@ -22,8 +22,11 @@ import static org.junit.Assert.assertTrue;
|
|
|
|
|
|
import java.io.IOException;
|
|
|
import java.security.PrivilegedExceptionAction;
|
|
|
+import java.util.Arrays;
|
|
|
import java.util.HashMap;
|
|
|
+import java.util.List;
|
|
|
import java.util.Map;
|
|
|
+import java.util.Random;
|
|
|
import java.util.concurrent.ExecutionException;
|
|
|
import java.util.concurrent.Future;
|
|
|
|
|
@@ -31,18 +34,30 @@ import org.apache.commons.logging.Log;
|
|
|
import org.apache.commons.logging.LogFactory;
|
|
|
import org.apache.hadoop.conf.Configuration;
|
|
|
import org.apache.hadoop.fs.CommonConfigurationKeys;
|
|
|
+import org.apache.hadoop.fs.FileStatus;
|
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
|
import org.apache.hadoop.fs.Path;
|
|
|
import org.apache.hadoop.fs.Options.Rename;
|
|
|
+import org.apache.hadoop.fs.permission.FsAction;
|
|
|
+import org.apache.hadoop.fs.permission.FsPermission;
|
|
|
+import org.apache.hadoop.hdfs.TestDFSPermission.PermissionGenerator;
|
|
|
import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
|
|
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
|
|
import org.apache.hadoop.hdfs.server.namenode.NameNodeAdapter;
|
|
|
import org.apache.hadoop.ipc.AsyncCallLimitExceededException;
|
|
|
+import org.apache.hadoop.security.AccessControlException;
|
|
|
import org.apache.hadoop.security.UserGroupInformation;
|
|
|
+import org.apache.hadoop.util.Time;
|
|
|
import org.junit.Test;
|
|
|
|
|
|
public class TestAsyncDFSRename {
|
|
|
public static final Log LOG = LogFactory.getLog(TestAsyncDFSRename.class);
|
|
|
+ private final long seed = Time.now();
|
|
|
+ private final Random r = new Random(seed);
|
|
|
+ private final PermissionGenerator permGenerator = new PermissionGenerator(r);
|
|
|
+ private final short replFactor = 2;
|
|
|
+ private final long blockSize = 512;
|
|
|
+ private long fileLen = blockSize * 3;
|
|
|
|
|
|
/**
|
|
|
* Check the blocks of dst file are cleaned after rename with overwrite
|
|
@@ -50,8 +65,6 @@ public class TestAsyncDFSRename {
|
|
|
*/
|
|
|
@Test(timeout = 60000)
|
|
|
public void testAsyncRenameWithOverwrite() throws Exception {
|
|
|
- final short replFactor = 2;
|
|
|
- final long blockSize = 512;
|
|
|
Configuration conf = new Configuration();
|
|
|
MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(
|
|
|
replFactor).build();
|
|
@@ -60,8 +73,6 @@ public class TestAsyncDFSRename {
|
|
|
AsyncDistributedFileSystem adfs = dfs.getAsyncDistributedFileSystem();
|
|
|
|
|
|
try {
|
|
|
-
|
|
|
- long fileLen = blockSize * 3;
|
|
|
String src = "/foo/src";
|
|
|
String dst = "/foo/dst";
|
|
|
String src2 = "/foo/src2";
|
|
@@ -115,8 +126,6 @@ public class TestAsyncDFSRename {
|
|
|
|
|
|
@Test(timeout = 60000)
|
|
|
public void testCallGetReturnValueMultipleTimes() throws Exception {
|
|
|
- final short replFactor = 2;
|
|
|
- final long blockSize = 512;
|
|
|
final Path renameDir = new Path(
|
|
|
"/test/testCallGetReturnValueMultipleTimes/");
|
|
|
final Configuration conf = new HdfsConfiguration();
|
|
@@ -127,7 +136,6 @@ public class TestAsyncDFSRename {
|
|
|
final DistributedFileSystem dfs = cluster.getFileSystem();
|
|
|
final AsyncDistributedFileSystem adfs = dfs.getAsyncDistributedFileSystem();
|
|
|
final int count = 100;
|
|
|
- long fileLen = blockSize * 3;
|
|
|
final Map<Integer, Future<Void>> returnFutures = new HashMap<Integer, Future<Void>>();
|
|
|
|
|
|
assertTrue(dfs.mkdirs(renameDir));
|
|
@@ -178,15 +186,15 @@ public class TestAsyncDFSRename {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
- @Test(timeout = 120000)
|
|
|
- public void testAggressiveConcurrentAsyncRenameWithOverwrite()
|
|
|
+ @Test
|
|
|
+ public void testConservativeConcurrentAsyncRenameWithOverwrite()
|
|
|
throws Exception {
|
|
|
internalTestConcurrentAsyncRenameWithOverwrite(100,
|
|
|
"testAggressiveConcurrentAsyncRenameWithOverwrite");
|
|
|
}
|
|
|
|
|
|
@Test(timeout = 60000)
|
|
|
- public void testConservativeConcurrentAsyncRenameWithOverwrite()
|
|
|
+ public void testAggressiveConcurrentAsyncRenameWithOverwrite()
|
|
|
throws Exception {
|
|
|
internalTestConcurrentAsyncRenameWithOverwrite(10000,
|
|
|
"testConservativeConcurrentAsyncRenameWithOverwrite");
|
|
@@ -194,8 +202,6 @@ public class TestAsyncDFSRename {
|
|
|
|
|
|
private void internalTestConcurrentAsyncRenameWithOverwrite(
|
|
|
final int asyncCallLimit, final String basePath) throws Exception {
|
|
|
- final short replFactor = 2;
|
|
|
- final long blockSize = 512;
|
|
|
final Path renameDir = new Path(String.format("/test/%s/", basePath));
|
|
|
Configuration conf = new HdfsConfiguration();
|
|
|
conf.setInt(CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
|
|
@@ -206,7 +212,6 @@ public class TestAsyncDFSRename {
|
|
|
DistributedFileSystem dfs = cluster.getFileSystem();
|
|
|
AsyncDistributedFileSystem adfs = dfs.getAsyncDistributedFileSystem();
|
|
|
int count = 1000;
|
|
|
- long fileLen = blockSize * 3;
|
|
|
int start = 0, end = 0;
|
|
|
Map<Integer, Future<Void>> returnFutures = new HashMap<Integer, Future<Void>>();
|
|
|
|
|
@@ -274,8 +279,206 @@ public class TestAsyncDFSRename {
|
|
|
}
|
|
|
}
|
|
|
|
|
|
+ @Test
|
|
|
+ public void testConservativeConcurrentAsyncAPI() throws Exception {
|
|
|
+ internalTestConcurrentAsyncAPI(100, "testConservativeConcurrentAsyncAPI");
|
|
|
+ }
|
|
|
+
|
|
|
+ @Test(timeout = 60000)
|
|
|
+ public void testAggressiveConcurrentAsyncAPI() throws Exception {
|
|
|
+ internalTestConcurrentAsyncAPI(10000, "testAggressiveConcurrentAsyncAPI");
|
|
|
+ }
|
|
|
+
|
|
|
+ private void internalTestConcurrentAsyncAPI(final int asyncCallLimit,
|
|
|
+ final String basePath) throws Exception {
|
|
|
+ Configuration conf = new HdfsConfiguration();
|
|
|
+ String group1 = "group1";
|
|
|
+ String group2 = "group2";
|
|
|
+ String user1 = "user1";
|
|
|
+ int count = 500;
|
|
|
+
|
|
|
+ // explicitly turn on permission checking
|
|
|
+ conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
|
|
|
+ // set the limit of max async calls
|
|
|
+ conf.setInt(CommonConfigurationKeys.IPC_CLIENT_ASYNC_CALLS_MAX_KEY,
|
|
|
+ asyncCallLimit);
|
|
|
+
|
|
|
+ // create fake mapping for the groups
|
|
|
+ Map<String, String[]> u2gMap = new HashMap<String, String[]>(1);
|
|
|
+ u2gMap.put(user1, new String[] {group1, group2});
|
|
|
+ DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2gMap);
|
|
|
+
|
|
|
+ // start mini cluster
|
|
|
+ final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
|
|
|
+ .numDataNodes(3).build();
|
|
|
+ cluster.waitActive();
|
|
|
+ AsyncDistributedFileSystem adfs = cluster.getFileSystem()
|
|
|
+ .getAsyncDistributedFileSystem();
|
|
|
+
|
|
|
+ // prepare for test
|
|
|
+ FileSystem rootFs = FileSystem.get(conf);
|
|
|
+ final Path parent = new Path(String.format("/test/%s/", basePath));
|
|
|
+ final Path[] srcs = new Path[count];
|
|
|
+ final Path[] dsts = new Path[count];
|
|
|
+ short[] permissions = new short[count];
|
|
|
+ for (int i = 0; i < count; i++) {
|
|
|
+ srcs[i] = new Path(parent, "src" + i);
|
|
|
+ dsts[i] = new Path(parent, "dst" + i);
|
|
|
+ DFSTestUtil.createFile(rootFs, srcs[i], fileLen, replFactor, 1);
|
|
|
+ DFSTestUtil.createFile(rootFs, dsts[i], fileLen, replFactor, 1);
|
|
|
+ assertTrue(rootFs.exists(srcs[i]));
|
|
|
+ assertTrue(rootFs.getFileStatus(srcs[i]).isFile());
|
|
|
+ assertTrue(rootFs.exists(dsts[i]));
|
|
|
+ assertTrue(rootFs.getFileStatus(dsts[i]).isFile());
|
|
|
+ permissions[i] = permGenerator.next();
|
|
|
+ }
|
|
|
+
|
|
|
+ Map<Integer, Future<Void>> renameRetFutures =
|
|
|
+ new HashMap<Integer, Future<Void>>();
|
|
|
+ Map<Integer, Future<Void>> permRetFutures =
|
|
|
+ new HashMap<Integer, Future<Void>>();
|
|
|
+ Map<Integer, Future<Void>> ownerRetFutures =
|
|
|
+ new HashMap<Integer, Future<Void>>();
|
|
|
+ int start = 0, end = 0;
|
|
|
+ // test rename
|
|
|
+ for (int i = 0; i < count; i++) {
|
|
|
+ for (;;) {
|
|
|
+ try {
|
|
|
+ Future<Void> returnFuture = adfs.rename(srcs[i], dsts[i],
|
|
|
+ Rename.OVERWRITE);
|
|
|
+ renameRetFutures.put(i, returnFuture);
|
|
|
+ break;
|
|
|
+ } catch (AsyncCallLimitExceededException e) {
|
|
|
+ start = end;
|
|
|
+ end = i;
|
|
|
+ waitForReturnValues(renameRetFutures, start, end);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ // wait for completing the calls
|
|
|
+ for (int i = start; i < count; i++) {
|
|
|
+ renameRetFutures.get(i).get();
|
|
|
+ }
|
|
|
+
|
|
|
+ // Restart NN and check the rename successfully
|
|
|
+ cluster.restartNameNodes();
|
|
|
+
|
|
|
+ // very the src should not exist, dst should
|
|
|
+ for (int i = 0; i < count; i++) {
|
|
|
+ assertFalse(rootFs.exists(srcs[i]));
|
|
|
+ assertTrue(rootFs.exists(dsts[i]));
|
|
|
+ }
|
|
|
+
|
|
|
+ // test permissions
|
|
|
+ try {
|
|
|
+ for (int i = 0; i < count; i++) {
|
|
|
+ for (;;) {
|
|
|
+ try {
|
|
|
+ Future<Void> retFuture = adfs.setPermission(dsts[i],
|
|
|
+ new FsPermission(permissions[i]));
|
|
|
+ permRetFutures.put(i, retFuture);
|
|
|
+ break;
|
|
|
+ } catch (AsyncCallLimitExceededException e) {
|
|
|
+ start = end;
|
|
|
+ end = i;
|
|
|
+ waitForReturnValues(permRetFutures, start, end);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // wait for completing the calls
|
|
|
+ for (int i = start; i < count; i++) {
|
|
|
+ permRetFutures.get(i).get();
|
|
|
+ }
|
|
|
+
|
|
|
+ // Restart NN and check permission then
|
|
|
+ cluster.restartNameNodes();
|
|
|
+
|
|
|
+ // verify the permission
|
|
|
+ for (int i = 0; i < count; i++) {
|
|
|
+ assertTrue(rootFs.exists(dsts[i]));
|
|
|
+ FsPermission fsPerm = new FsPermission(permissions[i]);
|
|
|
+ checkAccessPermissions(rootFs.getFileStatus(dsts[i]),
|
|
|
+ fsPerm.getUserAction());
|
|
|
+ }
|
|
|
+
|
|
|
+ // test setOwner
|
|
|
+ start = 0;
|
|
|
+ end = 0;
|
|
|
+ for (int i = 0; i < count; i++) {
|
|
|
+ for (;;) {
|
|
|
+ try {
|
|
|
+ Future<Void> retFuture = adfs.setOwner(dsts[i], "user1",
|
|
|
+ "group2");
|
|
|
+ ownerRetFutures.put(i, retFuture);
|
|
|
+ break;
|
|
|
+ } catch (AsyncCallLimitExceededException e) {
|
|
|
+ start = end;
|
|
|
+ end = i;
|
|
|
+ waitForReturnValues(ownerRetFutures, start, end);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+ // wait for completing the calls
|
|
|
+ for (int i = start; i < count; i++) {
|
|
|
+ ownerRetFutures.get(i).get();
|
|
|
+ }
|
|
|
+
|
|
|
+ // Restart NN and check owner then
|
|
|
+ cluster.restartNameNodes();
|
|
|
+
|
|
|
+ // verify the owner
|
|
|
+ for (int i = 0; i < count; i++) {
|
|
|
+ assertTrue(rootFs.exists(dsts[i]));
|
|
|
+ assertTrue(
|
|
|
+ "user1".equals(rootFs.getFileStatus(dsts[i]).getOwner()));
|
|
|
+ assertTrue(
|
|
|
+ "group2".equals(rootFs.getFileStatus(dsts[i]).getGroup()));
|
|
|
+ }
|
|
|
+ } catch (AccessControlException ace) {
|
|
|
+ throw ace;
|
|
|
+ } finally {
|
|
|
+ if (rootFs != null) {
|
|
|
+ rootFs.close();
|
|
|
+ }
|
|
|
+ if (cluster != null) {
|
|
|
+ cluster.shutdown();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ static void checkAccessPermissions(FileStatus stat, FsAction mode)
|
|
|
+ throws IOException {
|
|
|
+ checkAccessPermissions(UserGroupInformation.getCurrentUser(), stat, mode);
|
|
|
+ }
|
|
|
+
|
|
|
+ static void checkAccessPermissions(final UserGroupInformation ugi,
|
|
|
+ FileStatus stat, FsAction mode) throws IOException {
|
|
|
+ FsPermission perm = stat.getPermission();
|
|
|
+ String user = ugi.getShortUserName();
|
|
|
+ List<String> groups = Arrays.asList(ugi.getGroupNames());
|
|
|
+
|
|
|
+ if (user.equals(stat.getOwner())) {
|
|
|
+ if (perm.getUserAction().implies(mode)) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ } else if (groups.contains(stat.getGroup())) {
|
|
|
+ if (perm.getGroupAction().implies(mode)) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ if (perm.getOtherAction().implies(mode)) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ throw new AccessControlException(String.format(
|
|
|
+ "Permission denied: user=%s, path=\"%s\":%s:%s:%s%s", user, stat
|
|
|
+ .getPath(), stat.getOwner(), stat.getGroup(),
|
|
|
+ stat.isDirectory() ? "d" : "-", perm));
|
|
|
+ }
|
|
|
+
|
|
|
@Test(timeout = 60000)
|
|
|
- public void testAsyncRenameWithException() throws Exception {
|
|
|
+ public void testAsyncAPIWithException() throws Exception {
|
|
|
Configuration conf = new HdfsConfiguration();
|
|
|
String group1 = "group1";
|
|
|
String group2 = "group2";
|
|
@@ -286,9 +489,9 @@ public class TestAsyncDFSRename {
|
|
|
conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
|
|
|
|
|
|
// create fake mapping for the groups
|
|
|
- Map<String, String[]> u2g_map = new HashMap<String, String[]>(1);
|
|
|
- u2g_map.put(user1, new String[] { group1, group2 });
|
|
|
- DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2g_map);
|
|
|
+ Map<String, String[]> u2gMap = new HashMap<String, String[]>(1);
|
|
|
+ u2gMap.put(user1, new String[] {group1, group2});
|
|
|
+ DFSTestUtil.updateConfWithFakeGroupMapping(conf, u2gMap);
|
|
|
|
|
|
// Initiate all four users
|
|
|
ugi1 = UserGroupInformation.createUserForTesting(user1, new String[] {
|
|
@@ -299,7 +502,7 @@ public class TestAsyncDFSRename {
|
|
|
cluster.waitActive();
|
|
|
|
|
|
FileSystem rootFs = FileSystem.get(conf);
|
|
|
- final Path renameDir = new Path("/test/async_rename_exception/");
|
|
|
+ final Path renameDir = new Path("/test/async_api_exception/");
|
|
|
final Path src = new Path(renameDir, "src");
|
|
|
final Path dst = new Path(renameDir, "dst");
|
|
|
rootFs.mkdirs(src);
|
|
@@ -312,11 +515,33 @@ public class TestAsyncDFSRename {
|
|
|
}
|
|
|
});
|
|
|
|
|
|
+ Future<Void> retFuture;
|
|
|
+ try {
|
|
|
+ retFuture = adfs.rename(src, dst, Rename.OVERWRITE);
|
|
|
+ retFuture.get();
|
|
|
+ } catch (ExecutionException e) {
|
|
|
+ checkPermissionDenied(e, src, user1);
|
|
|
+ assertTrue("Permission denied messages must carry the path parent", e
|
|
|
+ .getMessage().contains(src.getParent().toUri().getPath()));
|
|
|
+ }
|
|
|
+
|
|
|
+ FsPermission fsPerm = new FsPermission(permGenerator.next());
|
|
|
+ try {
|
|
|
+ retFuture = adfs.setPermission(src, fsPerm);
|
|
|
+ retFuture.get();
|
|
|
+ } catch (ExecutionException e) {
|
|
|
+ checkPermissionDenied(e, src, user1);
|
|
|
+ assertTrue("Permission denied messages must carry the name of the path",
|
|
|
+ e.getMessage().contains(src.getName()));
|
|
|
+ }
|
|
|
+
|
|
|
try {
|
|
|
- Future<Void> returnFuture = adfs.rename(src, dst, Rename.OVERWRITE);
|
|
|
- returnFuture.get();
|
|
|
+ retFuture = adfs.setOwner(src, "user1", "group2");
|
|
|
+ retFuture.get();
|
|
|
} catch (ExecutionException e) {
|
|
|
checkPermissionDenied(e, src, user1);
|
|
|
+ assertTrue("Permission denied messages must carry the name of the path",
|
|
|
+ e.getMessage().contains(src.getName()));
|
|
|
} finally {
|
|
|
if (rootFs != null) {
|
|
|
rootFs.close();
|
|
@@ -334,7 +559,5 @@ public class TestAsyncDFSRename {
|
|
|
e.getMessage().contains("AccessControlException"));
|
|
|
assertTrue("Permission denied messages must carry the username", e
|
|
|
.getMessage().contains(user));
|
|
|
- assertTrue("Permission denied messages must carry the path parent", e
|
|
|
- .getMessage().contains(dir.getParent().toUri().getPath()));
|
|
|
}
|
|
|
}
|