Bläddra i källkod

HDFS-13423. Ozone: Clean-up of ozone related change from hadoop-hdfs-project. Contributed by Nanda Kumar.

Mukul Kumar Singh 7 år sedan
förälder
incheckning
979bbb4019
29 ändrade filer med 108 tillägg och 111 borttagningar
  1. 46 0
      hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java
  2. 1 2
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java
  3. 1 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java
  4. 0 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java
  5. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  6. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java
  7. 0 8
      hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto
  8. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java
  9. 1 18
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java
  10. 19 3
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java
  11. 1 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java
  12. 2 6
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java
  13. 1 2
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneHelper.java
  14. 1 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
  15. 1 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
  16. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java
  17. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java
  18. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java
  19. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java
  20. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java
  21. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java
  22. 3 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java
  23. 3 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java
  24. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
  25. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
  26. 2 2
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
  27. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java
  28. 2 1
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java
  29. 1 44
      hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java

+ 46 - 0
hadoop-hdds/common/src/main/java/org/apache/hadoop/hdds/HddsUtils.java

@@ -22,18 +22,26 @@ import com.google.common.base.Optional;
 import com.google.common.base.Strings;
 import com.google.common.net.HostAndPort;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.net.InetSocketAddress;
+import java.net.UnknownHostException;
 import java.nio.file.Paths;
 import java.util.Collection;
 import java.util.HashSet;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys
+    .DFS_DATANODE_DNS_INTERFACE_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys
+    .DFS_DATANODE_DNS_NAMESERVER_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
 
@@ -269,4 +277,42 @@ public class HddsUtils {
     }
     return dataNodeIDPath;
   }
+
+  /**
+   * Returns the hostname for this datanode. If the hostname is not
+   * explicitly configured in the given config, then it is determined
+   * via the DNS class.
+   *
+   * @param conf Configuration
+   *
+   * @return the hostname (NB: may not be a FQDN)
+   * @throws UnknownHostException if the dfs.datanode.dns.interface
+   *    option is used and the hostname can not be determined
+   */
+  public static String getHostName(Configuration conf)
+      throws UnknownHostException {
+    String name = conf.get(DFS_DATANODE_HOST_NAME_KEY);
+    if (name == null) {
+      String dnsInterface = conf.get(
+          CommonConfigurationKeys.HADOOP_SECURITY_DNS_INTERFACE_KEY);
+      String nameServer = conf.get(
+          CommonConfigurationKeys.HADOOP_SECURITY_DNS_NAMESERVER_KEY);
+      boolean fallbackToHosts = false;
+
+      if (dnsInterface == null) {
+        // Try the legacy configuration keys.
+        dnsInterface = conf.get(DFS_DATANODE_DNS_INTERFACE_KEY);
+        nameServer = conf.get(DFS_DATANODE_DNS_NAMESERVER_KEY);
+      } else {
+        // If HADOOP_SECURITY_DNS_* is set then also attempt hosts file
+        // resolution if DNS fails. We will not use hosts file resolution
+        // by default to avoid breaking existing clusters.
+        fallbackToHosts = true;
+      }
+
+      name = DNS.getDefaultHost(dnsInterface, nameServer, fallbackToHosts);
+    }
+    return name;
+  }
+
 }

+ 1 - 2
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/HddsDatanodeService.java

@@ -22,7 +22,6 @@ import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configurable;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
@@ -67,7 +66,7 @@ public class HddsDatanodeService implements ServicePlugin {
     }
     if (HddsUtils.isHddsEnabled(conf)) {
       try {
-        String hostname = DataNode.getHostName(conf);
+        String hostname = HddsUtils.getHostName(conf);
         String ip = InetAddress.getByName(hostname).getHostAddress();
         datanodeDetails = initializeDatanodeDetails();
         datanodeDetails.setHostName(hostname);

+ 1 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/HdfsServerConstants.java

@@ -84,8 +84,7 @@ public interface HdfsServerConstants {
   enum NodeType {
     NAME_NODE,
     DATA_NODE,
-    JOURNAL_NODE,
-    STORAGE_CONTAINER_SERVICE
+    JOURNAL_NODE
   }
 
   /** Startup options for rolling upgrade. */

+ 0 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/common/StorageInfo.java

@@ -262,8 +262,4 @@ public class StorageInfo {
     }
     return props;
   }
-
-  public NodeType getNodeType() {
-    return storageType;
-  }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -914,7 +914,7 @@ public class DataNode extends ReconfigurableBase
    * @throws UnknownHostException if the dfs.datanode.dns.interface
    *    option is used and the hostname can not be determined
    */
-  public static String getHostName(Configuration config)
+  private static String getHostName(Configuration config)
       throws UnknownHostException {
     String name = config.get(DFS_DATANODE_HOST_NAME_KEY);
     if (name == null) {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/RestCsrfPreventionFilterHandler.java

@@ -43,7 +43,7 @@ import org.apache.hadoop.security.http.RestCsrfPreventionFilter.HttpInteraction;
  * handler drops the request and immediately sends an HTTP 400 response.
  */
 @InterfaceAudience.Private
-public final class RestCsrfPreventionFilterHandler
+final class RestCsrfPreventionFilterHandler
     extends SimpleChannelInboundHandler<HttpRequest> {
 
   private static final Log LOG = DatanodeHttpServer.LOG;

+ 0 - 8
hadoop-hdfs-project/hadoop-hdfs/src/main/proto/HdfsServer.proto

@@ -187,14 +187,6 @@ message StorageInfoProto {
   required uint32 namespceID = 2;    // File system namespace ID
   required string clusterID = 3;     // ID of the cluster
   required uint64 cTime = 4;         // File system creation time
-
-    enum NodeTypeProto {
-    NAME_NODE = 1;
-    DATA_NODE = 2;
-    JOURNAL_NODE = 3;
-    STORAGE_CONTAINER_SERVICE = 4;
-  }
-  optional NodeTypeProto nodeType = 5;
 }
 
 /**

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFavoredNodesEndToEnd.java

@@ -87,7 +87,7 @@ public class TestFavoredNodesEndToEnd {
     for (int i = 0; i < NUM_FILES; i++) {
       Random rand = new Random(System.currentTimeMillis() + i);
       //pass a new created rand so as to get a uniform distribution each time
-      //without too much collisions (look at the do-while loop in getMembers)
+      //without too much collisions (look at the do-while loop in getDatanodes)
       InetSocketAddress datanode[] = getDatanodes(rand);
       Path p = new Path("/filename"+i);
       FSDataOutputStream out = dfs.create(p, FsPermission.getDefault(), true,
@@ -168,7 +168,7 @@ public class TestFavoredNodesEndToEnd {
     for (int i = 0; i < NUM_FILES; i++) {
       Random rand = new Random(System.currentTimeMillis() + i);
       // pass a new created rand so as to get a uniform distribution each time
-      // without too much collisions (look at the do-while loop in getMembers)
+      // without too much collisions (look at the do-while loop in getDatanodes)
       InetSocketAddress datanode[] = getDatanodes(rand);
       Path p = new Path("/filename" + i);
       // create and close the file.
@@ -195,7 +195,7 @@ public class TestFavoredNodesEndToEnd {
     for (int i = 0; i < NUM_FILES; i++) {
       Random rand = new Random(System.currentTimeMillis() + i);
       //pass a new created rand so as to get a uniform distribution each time
-      //without too much collisions (look at the do-while loop in getMembers)
+      //without too much collisions (look at the do-while loop in getDatanodes)
       InetSocketAddress[] dns = getDatanodes(rand);
       Path p = new Path("/filename"+i);
       FSDataOutputStream out =

+ 1 - 18
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneClassicCluster.java

@@ -23,10 +23,8 @@ import com.google.common.base.Preconditions;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.net.NetUtils;
@@ -49,7 +47,6 @@ import org.apache.hadoop.test.GenericTestUtils;
 
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_PLUGINS_KEY;
 
-import org.apache.hadoop.util.ServicePlugin;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.event.Level;
@@ -242,7 +239,7 @@ public final class MiniOzoneClassicCluster extends MiniDFSCluster
     // An Ozone request may originate at any DataNode, so pick one at random.
     int dnIndex = new Random().nextInt(getDataNodes().size());
     String uri = String.format("http://127.0.0.1:%d",
-        getOzoneRestPort(getDataNodes().get(dnIndex)));
+        MiniOzoneTestHelper.getOzoneRestPort(getDataNodes().get(dnIndex)));
     LOG.info("Creating Ozone client to DataNode {} with URI {} and user {}",
         dnIndex, uri, USER_AUTH);
     try {
@@ -339,20 +336,6 @@ public final class MiniOzoneClassicCluster extends MiniDFSCluster
         4 * 1000);
   }
 
-  public static DatanodeDetails getDatanodeDetails(DataNode dataNode) {
-    DatanodeDetails datanodeDetails = null;
-    for (ServicePlugin plugin : dataNode.getPlugins()) {
-      if (plugin instanceof HddsDatanodeService) {
-        datanodeDetails = ((HddsDatanodeService) plugin).getDatanodeDetails();
-      }
-    }
-    return datanodeDetails;
-  }
-
-  public static int getOzoneRestPort(DataNode dataNode) {
-    return getDatanodeDetails(dataNode).getOzoneRestPort();
-  }
-
   /**
    * Builder for configuring the MiniOzoneCluster to run.
    */

+ 19 - 3
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/MiniOzoneTestHelper.java

@@ -25,6 +25,9 @@ import org.apache.hadoop.ozone.container.common.statemachine
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.util.ServicePlugin;
 
+import java.lang.reflect.Field;
+import java.util.List;
+
 /**
  * Stateless helper functions for MiniOzone based tests.
  */
@@ -37,6 +40,10 @@ public class MiniOzoneTestHelper {
     return findHddsPlugin(dataNode).getDatanodeDetails();
   }
 
+  public static int getOzoneRestPort(DataNode dataNode) {
+    return MiniOzoneTestHelper.getDatanodeDetails(dataNode).getOzoneRestPort();
+  }
+
   public static OzoneContainer getOzoneContainer(DataNode dataNode) {
     return findHddsPlugin(dataNode).getDatanodeStateMachine()
         .getContainer();
@@ -52,10 +59,19 @@ public class MiniOzoneTestHelper {
   }
 
   private static HddsDatanodeService findHddsPlugin(DataNode dataNode) {
-    for (ServicePlugin plugin : dataNode.getPlugins()) {
-      if (plugin instanceof HddsDatanodeService) {
-        return (HddsDatanodeService) plugin;
+    try {
+      Field pluginsField = DataNode.class.getDeclaredField("plugins");
+      pluginsField.setAccessible(true);
+      List<ServicePlugin> plugins =
+          (List<ServicePlugin>) pluginsField.get(dataNode);
+
+      for (ServicePlugin plugin : plugins) {
+        if (plugin instanceof HddsDatanodeService) {
+          return (HddsDatanodeService) plugin;
+        }
       }
+    } catch (NoSuchFieldException | IllegalAccessException e) {
+      e.printStackTrace();
     }
     throw new IllegalStateException("Can't find the Hdds server plugin in the"
         + " plugin collection of datanode");

+ 1 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/RatisTestHelper.java

@@ -80,7 +80,7 @@ public interface RatisTestHelper {
     }
 
     public int getDatanodeOzoneRestPort() {
-      return MiniOzoneClassicCluster.getOzoneRestPort(
+      return MiniOzoneTestHelper.getOzoneRestPort(
           cluster.getDataNodes().get(0));
     }
   }

+ 2 - 6
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestMiniOzoneCluster.java

@@ -95,12 +95,8 @@ public class TestMiniOzoneCluster {
     for(DataNode dn : datanodes) {
       // Create a single member pipe line
       String containerName = OzoneUtils.getRequestID();
-      DatanodeDetails datanodeDetails = null;
-      for (ServicePlugin plugin : dn.getPlugins()) {
-        if (plugin instanceof HddsDatanodeService) {
-          datanodeDetails = ((HddsDatanodeService) plugin).getDatanodeDetails();
-        }
-      }
+      DatanodeDetails datanodeDetails =
+          MiniOzoneTestHelper.getDatanodeDetails(dn);
       final PipelineChannel pipelineChannel =
           new PipelineChannel(datanodeDetails.getUuidString(),
               HddsProtos.LifeCycleState.OPEN,

+ 1 - 2
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestOzoneHelper.java

@@ -15,11 +15,10 @@
  *  See the License for the specific language governing permissions and
  *  limitations under the License.
  */
-package org.apache.hadoop.ozone.web;
+package org.apache.hadoop.ozone;
 
 import org.apache.hadoop.ozone.web.exceptions.ErrorTable;
 import org.apache.hadoop.ozone.client.rest.headers.Header;
-import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
 import org.apache.hadoop.util.Time;
 import org.apache.http.HttpResponse;

+ 1 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java

@@ -170,7 +170,7 @@ public class TestStorageContainerManagerHelper {
   private OzoneContainer getContainerServerByDatanodeUuid(String dnUUID)
       throws IOException {
     for (DataNode dn : cluster.getDataNodes()) {
-      if (MiniOzoneClassicCluster.getDatanodeDetails(dn).getUuidString()
+      if (MiniOzoneTestHelper.getDatanodeDetails(dn).getUuidString()
           .equals(dnUUID)) {
         return MiniOzoneTestHelper.getOzoneContainer(dn);
       }

+ 1 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java

@@ -86,7 +86,7 @@ public class TestCloseContainerHandler {
 
     Assert.assertFalse(isContainerClosed(cluster, containerName));
 
-    DatanodeDetails datanodeDetails = MiniOzoneClassicCluster
+    DatanodeDetails datanodeDetails = MiniOzoneTestHelper
         .getDatanodeDetails(cluster.getDataNodes().get(0));
     //send the order to close the container
     cluster.getStorageContainerManager().getScmNodeManager()

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainerRatis.java

@@ -21,6 +21,7 @@ package org.apache.hadoop.ozone.container.ozoneimpl;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.RatisTestHelper;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
@@ -88,7 +89,7 @@ public class TestOzoneContainerRatis {
       final Pipeline pipeline = ContainerTestHelper.createPipeline(
           containerName,
           CollectionUtils.as(datanodes,
-              MiniOzoneClassicCluster::getDatanodeDetails));
+              MiniOzoneTestHelper::getDatanodeDetails));
       LOG.info("pipeline=" + pipeline);
 
       // Create Ratis cluster

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestRatisManager.java

@@ -22,6 +22,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.RatisTestHelper;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
@@ -84,7 +85,7 @@ public class TestRatisManager {
 
       final List<DataNode> datanodes = cluster.getDataNodes();
       final List<DatanodeDetails> datanodeDetailsSet = datanodes.stream()
-          .map(MiniOzoneClassicCluster::getDatanodeDetails).collect(
+          .map(MiniOzoneTestHelper::getDatanodeDetails).collect(
               Collectors.toList());
 
       //final RatisManager manager = RatisManager.newRatisManager(conf);

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestKeySpaceManagerRestInterface.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.ksm.helpers.ServiceInfo;
 import org.apache.hadoop.ozone.protocol.proto
@@ -124,7 +125,7 @@ public class TestKeySpaceManagerRestInterface {
       switch (type) {
       case HTTP:
       case HTTPS:
-        Assert.assertEquals(MiniOzoneClassicCluster.getOzoneRestPort(datanode),
+        Assert.assertEquals(MiniOzoneTestHelper.getOzoneRestPort(datanode),
             (int) ports.get(type));
         break;
       default:

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ozShell/TestOzoneShell.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneAcl;
 import org.apache.hadoop.ozone.OzoneAcl.OzoneACLRights;
 import org.apache.hadoop.ozone.OzoneAcl.OzoneACLType;
@@ -117,7 +118,7 @@ public class TestOzoneShell {
     cluster = new MiniOzoneClassicCluster.Builder(conf)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
     DataNode dataNode = cluster.getDataNodes().get(0);
-    final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode);
+    final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
     url = String.format("http://localhost:%d", port);
     client = new OzoneRestClient(String.format("http://localhost:%d", port));
     client.setUserAuth(OzoneConsts.OZONE_SIMPLE_HDFS_USER);

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java

@@ -22,6 +22,7 @@ import org.apache.hadoop.hdds.scm.StorageContainerManager;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
@@ -234,7 +235,7 @@ public class TestSCMCli {
   @Test
   public void testInfoContainer() throws Exception {
     // The cluster has one Datanode server.
-    DatanodeDetails datanodeDetails = MiniOzoneClassicCluster
+    DatanodeDetails datanodeDetails = MiniOzoneTestHelper
         .getDatanodeDetails(cluster.getDataNodes().get(0));
     String formatStr =
         "Container Name: %s\n" +

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/scm/TestSCMMetrics.java

@@ -32,6 +32,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerReport;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
@@ -173,7 +174,7 @@ public class TestSCMMetrics {
       StorageContainerManager scmManager = cluster.getStorageContainerManager();
 
       DataNode dataNode = cluster.getDataNodes().get(0);
-      String datanodeUuid = MiniOzoneClassicCluster.getDatanodeDetails(dataNode)
+      String datanodeUuid = MiniOzoneTestHelper.getDatanodeDetails(dataNode)
           .getUuidString();
       ContainerReportsRequestProto request = createContainerReport(numReport,
           stat, datanodeUuid);

+ 3 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestDistributedOzoneVolumes.java

@@ -19,9 +19,11 @@ package org.apache.hadoop.ozone.web;
 
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestOzoneHelper;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
 import org.junit.Rule;
@@ -67,7 +69,7 @@ public class TestDistributedOzoneVolumes extends TestOzoneHelper {
     cluster = new MiniOzoneClassicCluster.Builder(conf)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
     DataNode dataNode = cluster.getDataNodes().get(0);
-    port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode);
+    port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
   }
 
   /**

+ 3 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestLocalOzoneVolumes.java

@@ -19,9 +19,11 @@ package org.apache.hadoop.ozone.web;
 
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.TestOzoneHelper;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.log4j.Level;
 import org.apache.log4j.Logger;
@@ -70,7 +72,7 @@ public class TestLocalOzoneVolumes extends TestOzoneHelper {
     cluster = new MiniOzoneClassicCluster.Builder(conf)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build();
     DataNode dataNode = cluster.getDataNodes().get(0);
-    port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode);
+    port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
   }
 
   /**

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.web;
 
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -79,7 +80,7 @@ public class TestOzoneWebAccess {
     cluster = new MiniOzoneClassicCluster.Builder(conf)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_LOCAL).build();
     DataNode dataNode = cluster.getDataNodes().get(0);
-    port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode);
+    port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
   }
 
   /**

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.ozone.web.client;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -80,7 +81,7 @@ public class TestBuckets {
     cluster = new MiniOzoneClassicCluster.Builder(conf)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
     DataNode dataNode = cluster.getDataNodes().get(0);
-    final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode);
+    final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
     ozoneRestClient = new OzoneRestClient(
         String.format("http://localhost:%d", port));
   }

+ 2 - 2
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java

@@ -111,7 +111,7 @@ public class TestKeys {
     ozoneCluster = new MiniOzoneClassicCluster.Builder(conf)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
     DataNode dataNode = ozoneCluster.getDataNodes().get(0);
-    final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode);
+    final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
     ozoneRestClient = new OzoneRestClient(
         String.format("http://localhost:%d", port));
     currentTime = Time.now();
@@ -282,7 +282,7 @@ public class TestKeys {
     cluster.restartDataNode(datanodeIdx);
     // refresh the datanode endpoint uri after datanode restart
     DataNode dataNode = cluster.getDataNodes().get(datanodeIdx);
-    final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode);
+    final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
     client.setEndPoint(String.format("http://localhost:%d", port));
   }
 

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestOzoneClient.java

@@ -45,6 +45,7 @@ import io.netty.handler.logging.LoggingHandler;
 
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -98,7 +99,7 @@ public class TestOzoneClient {
         .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
     DataNode dataNode = cluster.getDataNodes().get(0);
     endpoint = String.format("http://localhost:%d",
-        MiniOzoneClassicCluster.getOzoneRestPort(dataNode));
+        MiniOzoneTestHelper.getOzoneRestPort(dataNode));
   }
 
   @AfterClass

+ 2 - 1
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java

@@ -24,6 +24,7 @@ import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.MiniOzoneTestHelper;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.proto.KeySpaceManagerProtocolProtos.Status;
@@ -90,7 +91,7 @@ public class TestVolume {
     cluster = new MiniOzoneClassicCluster.Builder(conf)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED).build();
     DataNode dataNode = cluster.getDataNodes().get(0);
-    final int port = MiniOzoneClassicCluster.getOzoneRestPort(dataNode);
+    final int port = MiniOzoneTestHelper.getOzoneRestPort(dataNode);
 
     ozoneRestClient = new OzoneRestClient(
         String.format("http://localhost:%d", port));

+ 1 - 44
hadoop-ozone/objectstore-service/src/main/java/org/apache/hadoop/ozone/web/netty/ObjectStoreRestHttpServer.java

@@ -19,7 +19,6 @@ package org.apache.hadoop.ozone.web.netty;
 
 import javax.servlet.FilterConfig;
 import javax.servlet.ServletContext;
-import javax.servlet.ServletException;
 import java.io.Closeable;
 import java.io.IOException;
 import java.net.BindException;
@@ -33,8 +32,6 @@ import java.util.Map;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
-import org.apache.hadoop.hdfs.server.datanode.web
-    .RestCsrfPreventionFilterHandler;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.security.http.RestCsrfPreventionFilter;
@@ -54,10 +51,6 @@ import io.netty.handler.codec.http.HttpResponseEncoder;
 import io.netty.handler.stream.ChunkedWriteHandler;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .HDDS_REST_CSRF_ENABLED_DEFAULT;
-import static org.apache.hadoop.hdds.scm.ScmConfigKeys
-    .HDDS_REST_CSRF_ENABLED_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
     .HDDS_REST_HTTP_ADDRESS_DEFAULT;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys
@@ -75,7 +68,6 @@ public class ObjectStoreRestHttpServer implements Closeable {
   private final ServerBootstrap httpServer;
   private final Configuration conf;
   private final Configuration confForCreate;
-  private final RestCsrfPreventionFilter restCsrfPreventionFilter;
   private InetSocketAddress httpAddress;
   static final Log LOG = LogFactory.getLog(ObjectStoreRestHttpServer.class);
   private final ObjectStoreHandler objectStoreHandler;
@@ -83,7 +75,6 @@ public class ObjectStoreRestHttpServer implements Closeable {
   public ObjectStoreRestHttpServer(final Configuration conf,
       final ServerSocketChannel externalHttpChannel,
       ObjectStoreHandler objectStoreHandler) throws IOException {
-    this.restCsrfPreventionFilter = createRestCsrfPreventionFilter(conf);
     this.conf = conf;
 
     this.confForCreate = new Configuration(conf);
@@ -101,11 +92,7 @@ public class ObjectStoreRestHttpServer implements Closeable {
       protected void initChannel(SocketChannel ch) throws Exception {
         ChannelPipeline p = ch.pipeline();
         p.addLast(new HttpRequestDecoder(), new HttpResponseEncoder());
-        if (restCsrfPreventionFilter != null) {
-          p.addLast(
-              new RestCsrfPreventionFilterHandler(restCsrfPreventionFilter));
-        }
-
+        // Later we have to support cross-site request forgery (CSRF) Filter
         p.addLast(new ChunkedWriteHandler(), new ObjectStoreURLDispatcher(
             objectStoreHandler.getObjectStoreJerseyContainer()));
       }
@@ -172,36 +159,6 @@ public class ObjectStoreRestHttpServer implements Closeable {
     }
   }
 
-  /**
-   * Creates the {@link RestCsrfPreventionFilter} for the DataNode.  Since the
-   * DataNode HTTP server is not implemented in terms of the servlet API, it
-   * takes some extra effort to obtain an instance of the filter.  This method
-   * takes care of configuration and implementing just enough of the servlet API
-   * and related interfaces so that the DataNode can get a fully initialized
-   * instance of the filter.
-   *
-   * @param conf configuration to read
-   * @return initialized filter, or null if CSRF protection not enabled
-   */
-  private static RestCsrfPreventionFilter createRestCsrfPreventionFilter(
-      Configuration conf) {
-    if (!conf.getBoolean(HDDS_REST_CSRF_ENABLED_KEY,
-        HDDS_REST_CSRF_ENABLED_DEFAULT)) {
-      return null;
-    }
-    String restCsrfClassName = RestCsrfPreventionFilter.class.getName();
-    Map<String, String> restCsrfParams = RestCsrfPreventionFilter
-        .getFilterParams(conf, "dfs.webhdfs.rest-csrf.");
-    RestCsrfPreventionFilter filter = new RestCsrfPreventionFilter();
-    try {
-      filter.init(new MapBasedFilterConfig(restCsrfClassName, restCsrfParams));
-    } catch (ServletException e) {
-      throw new IllegalStateException(
-          "Failed to initialize RestCsrfPreventionFilter.", e);
-    }
-    return filter;
-  }
-
   /**
    * A minimal {@link FilterConfig} implementation backed by a {@link Map}.
    */