Explorar el Código

HDFS-15052. WebHDFS getTrashRoot leads to OOM due to FileSystem object creation. (#1758)

(cherry picked from commit 2338d25dc7150d75fbda84cc95422380b5622224)

 Conflicts:
        hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

(cherry picked from commit 610805ec7245769aebb36e52725522c42cb3dd88)
Masatake Iwasaki hace 5 años
padre
commit
8095e3b149

+ 28 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSUtilClient.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.hdfs.net.Peer;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -1003,7 +1004,7 @@ public class DFSUtilClient {
    * @param ugi {@link UserGroupInformation} of current user.
    * @return the home directory of current user.
    */
-  public static Path getHomeDirectory(Configuration conf,
+  public static String getHomeDirectory(Configuration conf,
       UserGroupInformation ugi) {
     String userHomePrefix = HdfsClientConfigKeys
         .DFS_USER_HOME_DIR_PREFIX_DEFAULT;
@@ -1012,6 +1013,31 @@ public class DFSUtilClient {
           HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_KEY,
           HdfsClientConfigKeys.DFS_USER_HOME_DIR_PREFIX_DEFAULT);
     }
-    return new Path(userHomePrefix + "/" + ugi.getShortUserName());
+    return userHomePrefix + Path.SEPARATOR + ugi.getShortUserName();
+  }
+
+  /**
+   * Returns trash root in non-encryption zone.
+   * @param conf configuration.
+   * @param ugi user of trash owner.
+   * @return unqualified path of trash root.
+   */
+  public static String getTrashRoot(Configuration conf,
+      UserGroupInformation ugi) {
+    return getHomeDirectory(conf, ugi)
+        + Path.SEPARATOR + FileSystem.TRASH_PREFIX;
+  }
+
+  /**
+   * Returns trash root in encryption zone.
+   * @param ez encryption zone.
+   * @param ugi user of trash owner.
+   * @return unqualified path of trash root.
+   */
+  public static String getEZTrashRoot(EncryptionZone ez,
+      UserGroupInformation ugi) {
+    String ezpath = ez.getPath();
+    return (ezpath.equals("/") ? ezpath : ezpath + Path.SEPARATOR)
+        + FileSystem.TRASH_PREFIX + Path.SEPARATOR + ugi.getShortUserName();
   }
 }

+ 6 - 5
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -210,7 +210,8 @@ public class DistributedFileSystem extends FileSystem
 
   @Override
   public Path getHomeDirectory() {
-    return makeQualified(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi));
+    return makeQualified(
+        new Path(DFSUtilClient.getHomeDirectory(getConf(), dfs.ugi)));
   }
 
   /**
@@ -3070,8 +3071,7 @@ public class DistributedFileSystem extends FileSystem
       EncryptionZone ez = dfs.getEZForPath(parentSrc);
       if ((ez != null)) {
         return this.makeQualified(
-            new Path(new Path(ez.getPath(), FileSystem.TRASH_PREFIX),
-                dfs.ugi.getShortUserName()));
+            new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi)));
       }
     } catch (IOException e) {
       DFSClient.LOG.warn("Exception in checking the encryption zone for the " +
@@ -3098,7 +3098,8 @@ public class DistributedFileSystem extends FileSystem
       // Get EZ Trash roots
       final RemoteIterator<EncryptionZone> it = dfs.listEncryptionZones();
       while (it.hasNext()) {
-        Path ezTrashRoot = new Path(it.next().getPath(),
+        EncryptionZone ez = it.next();
+        Path ezTrashRoot = new Path(ez.getPath(),
             FileSystem.TRASH_PREFIX);
         if (!exists(ezTrashRoot)) {
           continue;
@@ -3110,7 +3111,7 @@ public class DistributedFileSystem extends FileSystem
             }
           }
         } else {
-          Path userTrash = new Path(ezTrashRoot, dfs.ugi.getShortUserName());
+          Path userTrash = new Path(DFSUtilClient.getEZTrashRoot(ez, dfs.ugi));
           try {
             ret.add(getFileStatus(userTrash));
           } catch (FileNotFoundException ignored) {

+ 36 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/web/resources/NamenodeWebHdfsMethods.java

@@ -63,7 +63,6 @@ import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Options;
 import org.apache.hadoop.fs.XAttr;
@@ -79,6 +78,7 @@ import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
+import org.apache.hadoop.hdfs.protocol.EncryptionZone;
 import org.apache.hadoop.hdfs.protocol.ErasureCodingPolicy;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
@@ -1200,7 +1200,7 @@ public class NamenodeWebHdfsMethods {
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
     case GETHOMEDIRECTORY: {
-      String userHome = DFSUtilClient.getHomeDirectory(conf, ugi).toString();
+      String userHome = DFSUtilClient.getHomeDirectory(conf, ugi);
       final String js = JsonUtil.toJsonString("Path", userHome);
       return Response.ok(js).type(MediaType.APPLICATION_JSON).build();
     }
@@ -1241,7 +1241,7 @@ public class NamenodeWebHdfsMethods {
       return Response.ok().build();
     }
     case GETTRASHROOT: {
-      final String trashPath = getTrashRoot(fullpath, conf);
+      final String trashPath = getTrashRoot(conf, fullpath);
       final String jsonStr = JsonUtil.toJsonString("Path", trashPath);
       return Response.ok(jsonStr).type(MediaType.APPLICATION_JSON).build();
     }
@@ -1301,11 +1301,39 @@ public class NamenodeWebHdfsMethods {
     }
   }
 
-  private static String getTrashRoot(String fullPath,
-      Configuration conf) throws IOException {
-    FileSystem fs = FileSystem.get(conf != null ? conf : new Configuration());
-    return fs.getTrashRoot(
-        new org.apache.hadoop.fs.Path(fullPath)).toUri().getPath();
+  private String getTrashRoot(Configuration conf, String fullPath)
+      throws IOException {
+    UserGroupInformation ugi= UserGroupInformation.getCurrentUser();
+    String parentSrc = getParent(fullPath);
+    EncryptionZone ez = getRpcClientProtocol().getEZForPath(
+        parentSrc != null ? parentSrc : fullPath);
+    String trashRoot;
+    if (ez != null) {
+      trashRoot = DFSUtilClient.getEZTrashRoot(ez, ugi);
+    } else {
+      trashRoot = DFSUtilClient.getTrashRoot(conf, ugi);
+    }
+    return trashRoot;
+  }
+
+  /**
+   * Returns the parent of a path in the same way as Path#getParent.
+   * @return the parent of a path or null if at root
+   */
+  public String getParent(String path) {
+    int lastSlash = path.lastIndexOf('/');
+    int start = 0;
+    if ((path.length() == start) || // empty path
+        (lastSlash == start && path.length() == start + 1)) { // at root
+      return null;
+    }
+    String parent;
+    if (lastSlash == -1) {
+      parent = org.apache.hadoop.fs.Path.CUR_DIR;
+    } else {
+      parent = path.substring(0, lastSlash == start ? start + 1 : lastSlash);
+    }
+    return parent;
   }
 
   private static DirectoryListing getDirectoryListing(final ClientProtocol cp,

+ 53 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHDFS.java

@@ -34,6 +34,7 @@ import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.fail;
 
 import java.io.EOFException;
+import java.io.File;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -48,6 +49,7 @@ import java.nio.charset.StandardCharsets;
 import java.security.PrivilegedExceptionAction;
 import java.util.Arrays;
 import java.util.Collection;
+import java.util.EnumSet;
 import java.util.Iterator;
 import java.util.Map;
 import java.util.Random;
@@ -61,11 +63,13 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RemoteIterator;
@@ -84,6 +88,8 @@ import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.TestDFSClientRetries;
 import org.apache.hadoop.hdfs.TestFileCreation;
+import org.apache.hadoop.hdfs.client.CreateEncryptionZoneFlag;
+import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
@@ -1637,6 +1643,53 @@ public class TestWebHDFS {
     }
   }
 
+  @Test
+  public void testGetEZTrashRoot() throws Exception {
+    final Configuration conf = WebHdfsTestUtil.createConf();
+    FileSystemTestHelper fsHelper = new FileSystemTestHelper();
+    File testRootDir = new File(fsHelper.getTestRootDir()).getAbsoluteFile();
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+        "jceks://file" + new Path(testRootDir.toString(), "test.jks").toUri());
+    final MiniDFSCluster cluster =
+        new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
+    cluster.waitActive();
+    DistributedFileSystem dfs = cluster.getFileSystem();
+    final WebHdfsFileSystem webhdfs = WebHdfsTestUtil.getWebHdfsFileSystem(
+        conf, WebHdfsConstants.WEBHDFS_SCHEME);
+    HdfsAdmin dfsAdmin = new HdfsAdmin(cluster.getURI(), conf);
+    dfs.getClient().setKeyProvider(
+        cluster.getNameNode().getNamesystem().getProvider());
+    final String testkey = "test_key";
+    DFSTestUtil.createKey(testkey, cluster, conf);
+
+    final Path zone1 = new Path("/zone1");
+    dfs.mkdirs(zone1, new FsPermission(700));
+    dfsAdmin.createEncryptionZone(zone1, testkey,
+        EnumSet.of(CreateEncryptionZoneFlag.PROVISION_TRASH));
+
+    final Path insideEZ = new Path(zone1, "insideEZ");
+    dfs.mkdirs(insideEZ, new FsPermission(700));
+    assertEquals(
+        dfs.getTrashRoot(insideEZ).toUri().getPath(),
+        webhdfs.getTrashRoot(insideEZ).toUri().getPath());
+
+    final Path outsideEZ = new Path("/outsideEZ");
+    dfs.mkdirs(outsideEZ, new FsPermission(755));
+    assertEquals(
+        dfs.getTrashRoot(outsideEZ).toUri().getPath(),
+        webhdfs.getTrashRoot(outsideEZ).toUri().getPath());
+
+    final Path root = new Path("/");
+    assertEquals(
+        dfs.getTrashRoot(root).toUri().getPath(),
+        webhdfs.getTrashRoot(root).toUri().getPath());
+    assertEquals(
+        webhdfs.getTrashRoot(root).toUri().getPath(),
+        webhdfs.getTrashRoot(zone1).toUri().getPath());
+    assertEquals(
+        webhdfs.getTrashRoot(outsideEZ).toUri().getPath(),
+        webhdfs.getTrashRoot(zone1).toUri().getPath());
+  }
 
   @Test
   public void testStoragePolicy() throws Exception {