Bläddra i källkod

HDFS-11687. Add new public encryption APIs required by Hive. (lei)

Change-Id: I4a23a00de63ad18022312ceb1f306a87d032d07c
Lei Xu 8 år sedan
förälder
incheckning
25f5d9ad5e

+ 2 - 16
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -2951,24 +2951,10 @@ public class DFSClient implements java.io.Closeable, RemotePeerFactory,
 
   /**
    * Probe for encryption enabled on this filesystem.
-   * Note (see HDFS-11689):
-   * Not to throw exception in this method since it would break hive.
-   * Hive accesses this method and assumes no exception would be thrown.
-   * Hive should not access DFSClient since it is InterfaceAudience.Private.
-   * Deprecated annotation is added to trigger build warning at hive side.
-   * Request has been made to Hive to remove access to DFSClient.
    * @return true if encryption is enabled
    */
-  @Deprecated
-  public boolean isHDFSEncryptionEnabled() {
-    boolean result = false;
-    try {
-      result = (getKeyProviderUri() != null);
-    } catch (IOException ioe) {
-      DFSClient.LOG.warn("Exception while checking whether encryption zone "
-            + "is supported, assumes it is not supported", ioe);
-    }
-    return result;
+  boolean isHDFSEncryptionEnabled() throws IOException {
+    return getKeyProviderUri() != null;
   }
 
   /**

+ 7 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -2587,8 +2587,13 @@ public class DistributedFileSystem extends FileSystem {
    */
   @Override
   public Path getTrashRoot(Path path) {
-    if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
-      return super.getTrashRoot(path);
+    try {
+      if ((path == null) || !dfs.isHDFSEncryptionEnabled()) {
+        return super.getTrashRoot(path);
+      }
+    } catch (IOException ioe) {
+      DFSClient.LOG.warn("Exception while checking whether encryption zone is "
+          + "supported", ioe);
     }
 
     String parentSrc = path.isRoot()?

+ 12 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsAdmin.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.KeyProvider;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
@@ -267,6 +268,17 @@ public class HdfsAdmin {
     return dfs.listCachePools();
   }
 
+  /**
+   * Get KeyProvider if present.
+   *
+   * @return the key provider if encryption is enabled on HDFS.
+   *         Otherwise, it returns null.
+   * @throws IOException on RPC exception to the NN.
+   */
+  public KeyProvider getKeyProvider() throws IOException {
+    return dfs.getClient().getKeyProvider();
+  }
+
   /**
    * Create an encryption zone rooted at an empty existing directory, using the
    * specified encryption key. An encryption zone has an associated encryption

+ 1 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestEncryptionZones.java

@@ -349,6 +349,7 @@ public class TestEncryptionZones {
   @Test
   public void testBasicOperations() throws Exception {
 
+    assertNotNull("key provider is not present", dfsAdmin.getKeyProvider());
     int numZones = 0;
     /* Number of EZs should be 0 if no EZ is created */
     assertEquals("Unexpected number of encryption zones!", numZones,

+ 33 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHdfsAdmin.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
+import java.io.File;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URISyntaxException;
@@ -27,8 +28,11 @@ import java.util.HashSet;
 import java.util.Set;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.crypto.key.JavaKeyStoreProvider;
 import org.apache.hadoop.fs.BlockStoragePolicySpi;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.client.HdfsAdmin;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
@@ -172,4 +176,33 @@ public class TestHdfsAdmin {
     Assert.assertTrue(
         Sets.difference(policyNamesSet2, policyNamesSet1).isEmpty());
   }
+
+  private static String getKeyProviderURI() {
+    FileSystemTestHelper helper = new FileSystemTestHelper();
+    // Set up java key store
+    String testRoot = helper.getTestRootDir();
+    File testRootDir = new File(testRoot).getAbsoluteFile();
+    return JavaKeyStoreProvider.SCHEME_NAME + "://file" +
+        new Path(testRootDir.toString(), "test.jks").toUri();
+  }
+
+  @Test
+  public void testGetKeyProvider() throws IOException {
+    HdfsAdmin hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+    Assert.assertNull("should return null for an non-encrypted cluster",
+        hdfsAdmin.getKeyProvider());
+
+    shutDownCluster();
+
+    Configuration conf = new Configuration();
+    conf.set(CommonConfigurationKeysPublic.HADOOP_SECURITY_KEY_PROVIDER_PATH,
+        getKeyProviderURI());
+
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
+    cluster.waitActive();
+    hdfsAdmin = new HdfsAdmin(FileSystem.getDefaultUri(conf), conf);
+
+    Assert.assertNotNull("should not return null for an encrypted cluster",
+        hdfsAdmin.getKeyProvider());
+  }
 }