浏览代码

HDFS-6210. Support GETACLSTATUS operation in WebImageViewer. Contributed by Akira Ajisaka.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/trunk@1589933 13f79535-47bb-0310-9956-ffa450edef68
Haohui Mai 11 年之前
父节点
当前提交
bce5a92c54

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -326,6 +326,9 @@ Release 2.5.0 - UNRELEASED
 
 
     HDFS-6266. Identify full path for a given INode. (jing9)
     HDFS-6266. Identify full path for a given INode. (jing9)
 
 
+    HDFS-6210. Support GETACLSTATUS operation in WebImageViewer.
+    (Akira Ajisaka via wheat9)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)
     HDFS-6214. Webhdfs has poor throughput for files >2GB (daryn)

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageHandler.java

@@ -104,6 +104,8 @@ public class FSImageHandler extends SimpleChannelUpstreamHandler {
         content = loader.getFileStatus(path);
         content = loader.getFileStatus(path);
       } else if (op.equals("LISTSTATUS")) {
       } else if (op.equals("LISTSTATUS")) {
         content = loader.listStatus(path);
         content = loader.listStatus(path);
+      } else if (op.equals("GETACLSTATUS")) {
+        content = loader.getAclStatus(path);
       } else {
       } else {
         response.setStatus(HttpResponseStatus.BAD_REQUEST);
         response.setStatus(HttpResponseStatus.BAD_REQUEST);
       }
       }

+ 76 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FSImageLoader.java

@@ -31,6 +31,7 @@ import java.util.Map;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.permission.AclEntry;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.fs.permission.PermissionStatus;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
@@ -270,6 +271,81 @@ class FSImageLoader {
     return list;
     return list;
   }
   }
 
 
+  /**
+   * Return the JSON formatted ACL status of the specified file.
+   * @param path a path specifies a file
+   * @return JSON formatted AclStatus
+   * @throws IOException if failed to serialize fileStatus to JSON.
+   */
+  String getAclStatus(String path) throws IOException {
+    StringBuilder sb = new StringBuilder();
+    List<AclEntry> aclEntryList = getAclEntryList(path);
+    PermissionStatus p = getPermissionStatus(path);
+    sb.append("{\"AclStatus\":{\"entries\":[");
+    int i = 0;
+    for (AclEntry aclEntry : aclEntryList) {
+      if (i++ != 0) {
+        sb.append(',');
+      }
+      sb.append('"');
+      sb.append(aclEntry.toString());
+      sb.append('"');
+    }
+    sb.append("],\"group\": \"");
+    sb.append(p.getGroupName());
+    sb.append("\",\"owner\": \"");
+    sb.append(p.getUserName());
+    sb.append("\",\"stickyBit\": ");
+    sb.append(p.getPermission().getStickyBit());
+    sb.append("}}\n");
+    return sb.toString();
+  }
+
+  private List<AclEntry> getAclEntryList(String path) {
+    long id = getINodeId(path);
+    FsImageProto.INodeSection.INode inode = inodes.get(id);
+    switch (inode.getType()) {
+      case FILE: {
+        FsImageProto.INodeSection.INodeFile f = inode.getFile();
+        return FSImageFormatPBINode.Loader.loadAclEntries(
+            f.getAcl(), stringTable);
+      }
+      case DIRECTORY: {
+        FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
+        return FSImageFormatPBINode.Loader.loadAclEntries(
+            d.getAcl(), stringTable);
+      }
+      default: {
+        return new ArrayList<AclEntry>();
+      }
+    }
+  }
+
+  private PermissionStatus getPermissionStatus(String path) {
+    long id = getINodeId(path);
+    FsImageProto.INodeSection.INode inode = inodes.get(id);
+    switch (inode.getType()) {
+      case FILE: {
+        FsImageProto.INodeSection.INodeFile f = inode.getFile();
+        return FSImageFormatPBINode.Loader.loadPermission(
+            f.getPermission(), stringTable);
+      }
+      case DIRECTORY: {
+        FsImageProto.INodeSection.INodeDirectory d = inode.getDirectory();
+        return FSImageFormatPBINode.Loader.loadPermission(
+            d.getPermission(), stringTable);
+      }
+      case SYMLINK: {
+        FsImageProto.INodeSection.INodeSymlink s = inode.getSymlink();
+        return FSImageFormatPBINode.Loader.loadPermission(
+            s.getPermission(), stringTable);
+      }
+      default: {
+        return null;
+      }
+    }
+  }
+
   /**
   /**
    * Return the INodeId of the specified path.
    * Return the INodeId of the specified path.
    */
    */

+ 206 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/TestOfflineImageViewerForAcl.java

@@ -0,0 +1,206 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs.tools.offlineImageViewer;
+
+import java.io.File;
+import java.io.IOException;
+import java.net.HttpURLConnection;
+import java.net.URI;
+import java.net.URISyntaxException;
+import java.net.URL;
+import java.util.HashMap;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.AclStatus;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
+import org.apache.hadoop.hdfs.server.namenode.FSImageTestUtil;
+import org.apache.hadoop.hdfs.web.WebHdfsFileSystem;
+import org.apache.hadoop.net.NetUtils;
+import org.junit.AfterClass;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+import static org.apache.hadoop.fs.permission.AclEntryScope.ACCESS;
+import static org.apache.hadoop.fs.permission.AclEntryScope.DEFAULT;
+import static org.apache.hadoop.fs.permission.AclEntryType.GROUP;
+import static org.apache.hadoop.fs.permission.AclEntryType.OTHER;
+import static org.apache.hadoop.fs.permission.AclEntryType.USER;
+import static org.apache.hadoop.fs.permission.FsAction.ALL;
+import static org.apache.hadoop.fs.permission.FsAction.READ;
+import static org.apache.hadoop.fs.permission.FsAction.READ_WRITE;
+import static org.apache.hadoop.fs.permission.FsAction.READ_EXECUTE;
+import static org.apache.hadoop.fs.permission.FsAction.NONE;
+import static org.apache.hadoop.hdfs.server.namenode.AclTestHelpers.aclEntry;
+import static org.junit.Assert.assertEquals;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+
+/**
+ * Tests OfflineImageViewer if the input fsimage has HDFS ACLs
+ */
+public class TestOfflineImageViewerForAcl {
+
+  private static final Log LOG =
+      LogFactory.getLog(TestOfflineImageViewerForAcl.class);
+
+  private static File originalFsimage = null;
+
+  // ACLs as set to dfs, to be compared with viewer's output
+  final static HashMap<String, AclStatus> writtenAcls = Maps.newHashMap();
+
+  /**
+   * Create a populated namespace for later testing. Save its contents to a
+   * data structure and store its fsimage location.
+   * We only want to generate the fsimage file once and use it for
+   * multiple tests.
+   */
+  @BeforeClass
+  public static void createOriginalFSImage() throws IOException {
+    MiniDFSCluster cluster = null;
+    try {
+      Configuration conf = new Configuration();
+      conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_ACLS_ENABLED_KEY, true);
+      cluster = new MiniDFSCluster.Builder(conf).build();
+      cluster.waitActive();
+      DistributedFileSystem hdfs = cluster.getFileSystem();
+
+      // Create a reasonable namespace with ACLs
+      Path dir = new Path("/dirWithNoAcl");
+      hdfs.mkdirs(dir);
+      writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
+
+      dir = new Path("/dirWithDefaultAcl");
+      hdfs.mkdirs(dir);
+      hdfs.setAcl(dir, Lists.newArrayList(
+          aclEntry(DEFAULT, USER, ALL),
+          aclEntry(DEFAULT, USER, "foo", ALL),
+          aclEntry(DEFAULT, GROUP, READ_EXECUTE),
+          aclEntry(DEFAULT, OTHER, NONE)));
+      writtenAcls.put(dir.toString(), hdfs.getAclStatus(dir));
+
+      Path file = new Path("/noAcl");
+      FSDataOutputStream o = hdfs.create(file);
+      o.write(23);
+      o.close();
+      writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
+
+      file = new Path("/withAcl");
+      o = hdfs.create(file);
+      o.write(23);
+      o.close();
+      hdfs.setAcl(file, Lists.newArrayList(
+          aclEntry(ACCESS, USER, READ_WRITE),
+          aclEntry(ACCESS, USER, "foo", READ),
+          aclEntry(ACCESS, GROUP, READ),
+          aclEntry(ACCESS, OTHER, NONE)));
+      writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
+
+      file = new Path("/withSeveralAcls");
+      o = hdfs.create(file);
+      o.write(23);
+      o.close();
+      hdfs.setAcl(file, Lists.newArrayList(
+          aclEntry(ACCESS, USER, READ_WRITE),
+          aclEntry(ACCESS, USER, "foo", READ_WRITE),
+          aclEntry(ACCESS, USER, "bar", READ),
+          aclEntry(ACCESS, GROUP, READ),
+          aclEntry(ACCESS, GROUP, "group", READ),
+          aclEntry(ACCESS, OTHER, NONE)));
+      writtenAcls.put(file.toString(), hdfs.getAclStatus(file));
+
+      // Write results to the fsimage file
+      hdfs.setSafeMode(HdfsConstants.SafeModeAction.SAFEMODE_ENTER, false);
+      hdfs.saveNamespace();
+
+      // Determine the location of the fsimage file
+      originalFsimage = FSImageTestUtil.findLatestImageFile(FSImageTestUtil
+          .getFSImage(cluster.getNameNode()).getStorage().getStorageDir(0));
+      if (originalFsimage == null) {
+        throw new RuntimeException("Didn't generate or can't find fsimage");
+      }
+      LOG.debug("original FS image file is " + originalFsimage);
+    } finally {
+      if (cluster != null)
+        cluster.shutdown();
+    }
+  }
+
+  @AfterClass
+  public static void deleteOriginalFSImage() throws IOException {
+    if (originalFsimage != null && originalFsimage.exists()) {
+      originalFsimage.delete();
+    }
+  }
+
+  @Test
+  public void testWebImageViewerForAcl() throws IOException,
+      InterruptedException, URISyntaxException {
+    WebImageViewer viewer = new WebImageViewer(
+        NetUtils.createSocketAddr("localhost:0"));
+    try {
+      viewer.initServer(originalFsimage.getAbsolutePath());
+      int port = viewer.getPort();
+
+      // create a WebHdfsFileSystem instance
+      URI uri = new URI("webhdfs://localhost:" + String.valueOf(port));
+      Configuration conf = new Configuration();
+      WebHdfsFileSystem webhdfs = (WebHdfsFileSystem)FileSystem.get(uri, conf);
+
+      // GETACLSTATUS operation to a directory without ACL
+      AclStatus acl = webhdfs.getAclStatus(new Path("/dirWithNoAcl"));
+      assertEquals(writtenAcls.get("/dirWithNoAcl"), acl);
+
+      // GETACLSTATUS operation to a directory with a default ACL
+      acl = webhdfs.getAclStatus(new Path("/dirWithDefaultAcl"));
+      assertEquals(writtenAcls.get("/dirWithDefaultAcl"), acl);
+
+      // GETACLSTATUS operation to a file without ACL
+      acl = webhdfs.getAclStatus(new Path("/noAcl"));
+      assertEquals(writtenAcls.get("/noAcl"), acl);
+
+      // GETACLSTATUS operation to a file with a ACL
+      acl = webhdfs.getAclStatus(new Path("/withAcl"));
+      assertEquals(writtenAcls.get("/withAcl"), acl);
+
+      // GETACLSTATUS operation to a file with several ACL entries
+      acl = webhdfs.getAclStatus(new Path("/withSeveralAcls"));
+      assertEquals(writtenAcls.get("/withSeveralAcls"), acl);
+
+      // GETACLSTATUS operation to a invalid path
+      URL url = new URL("http://localhost:" + port +
+          "/webhdfs/v1/invalid/?op=GETACLSTATUS");
+      HttpURLConnection connection = (HttpURLConnection) url.openConnection();
+      connection.setRequestMethod("GET");
+      connection.connect();
+      assertEquals(HttpURLConnection.HTTP_NOT_FOUND,
+          connection.getResponseCode());
+    } finally {
+      // shutdown the viewer
+      viewer.shutdown();
+    }
+  }
+}