瀏覽代碼

HDFS-13446. Ozone: Fix OzoneFileSystem contract test failures. Contributed by Mukul Kumar Singh.

Nanda kumar 7 年之前
父節點
當前提交
025058f251
共有 23 個文件被更改,包括 78 次插入19 次删除
  1. 5 0
      hadoop-hdds/container-service/pom.xml
  2. 7 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java
  3. 23 0
      hadoop-hdds/container-service/src/test/resources/log4j.properties
  4. 5 0
      hadoop-hdds/server-scm/pom.xml
  5. 1 1
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java
  6. 2 2
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java
  7. 1 1
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java
  8. 3 0
      hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java
  9. 25 0
      hadoop-tools/hadoop-ozone/pom.xml
  10. 2 2
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java
  11. 3 9
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java
  12. 0 0
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java
  13. 0 0
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java
  14. 0 0
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java
  15. 0 0
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java
  16. 0 0
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java
  17. 0 0
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java
  18. 0 0
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java
  19. 0 0
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java
  20. 0 0
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java
  21. 1 4
      hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java
  22. 0 0
      hadoop-tools/hadoop-ozone/src/test/resources/contract/ozone.xml
  23. 0 0
      hadoop-tools/hadoop-ozone/src/test/resources/log4j.properties

+ 5 - 0
hadoop-hdds/container-service/pom.xml

@@ -52,6 +52,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <scope>test</scope>
       <scope>test</scope>
     </dependency>
     </dependency>
 
 
+    <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
   </dependencies>
 
 
   <build>
   <build>

+ 7 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/InitDatanodeState.java

@@ -20,6 +20,7 @@ import com.google.common.base.Strings;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.HddsUtils;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.statemachine
 import org.apache.hadoop.ozone.container.common.statemachine
     .DatanodeStateMachine;
     .DatanodeStateMachine;
@@ -107,6 +108,12 @@ public class InitDatanodeState implements DatanodeState,
    */
    */
   private void persistContainerDatanodeDetails() throws IOException {
   private void persistContainerDatanodeDetails() throws IOException {
     String dataNodeIDPath = HddsUtils.getDatanodeIdFilePath(conf);
     String dataNodeIDPath = HddsUtils.getDatanodeIdFilePath(conf);
+    if (Strings.isNullOrEmpty(dataNodeIDPath)) {
+      LOG.error("A valid file path is needed for config setting {}",
+          ScmConfigKeys.OZONE_SCM_DATANODE_ID);
+      this.context.setState(DatanodeStateMachine.DatanodeStates.SHUTDOWN);
+      return;
+    }
     File idPath = new File(dataNodeIDPath);
     File idPath = new File(dataNodeIDPath);
     DatanodeDetails datanodeDetails = this.context.getParent()
     DatanodeDetails datanodeDetails = this.context.getParent()
         .getDatanodeDetails();
         .getDatanodeDetails();

+ 23 - 0
hadoop-hdds/container-service/src/test/resources/log4j.properties

@@ -0,0 +1,23 @@
+#
+#   Licensed to the Apache Software Foundation (ASF) under one or more
+#   contributor license agreements.  See the NOTICE file distributed with
+#   this work for additional information regarding copyright ownership.
+#   The ASF licenses this file to You under the Apache License, Version 2.0
+#   (the "License"); you may not use this file except in compliance with
+#   the License.  You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+#
+# log4j configuration used during build and unit tests
+
+log4j.rootLogger=INFO,stdout
+log4j.threshold=ALL
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n

+ 5 - 0
hadoop-hdds/server-scm/pom.xml

@@ -100,6 +100,11 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>hamcrest-all</artifactId>
       <artifactId>hamcrest-all</artifactId>
       <version>1.3</version>
       <version>1.3</version>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.bouncycastle</groupId>
+      <artifactId>bcprov-jdk16</artifactId>
+      <scope>test</scope>
+    </dependency>
   </dependencies>
   </dependencies>
   <build>
   <build>
     <plugins>
     <plugins>

+ 1 - 1
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/TestStorageContainerManagerHttpServer.java

@@ -76,7 +76,7 @@ public class TestStorageContainerManagerHttpServer {
     conf = new Configuration();
     conf = new Configuration();
     keystoresDir = new File(BASEDIR).getAbsolutePath();
     keystoresDir = new File(BASEDIR).getAbsolutePath();
     sslConfDir = KeyStoreTestUtil.getClasspathDir(
     sslConfDir = KeyStoreTestUtil.getClasspathDir(
-        org.apache.hadoop.hdfs.server.namenode.TestNameNodeHttpServer.class);
+        TestStorageContainerManagerHttpServer.class);
     KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
     KeyStoreTestUtil.setupSSLConfig(keystoresDir, sslConfDir, conf, false);
     connectionFactory =
     connectionFactory =
         URLConnectionFactory.newDefaultURLConnectionFactory(conf);
         URLConnectionFactory.newDefaultURLConnectionFactory(conf);

+ 2 - 2
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/block/TestDeletedBlockLog.java

@@ -262,7 +262,7 @@ public class TestDeletedBlockLog {
     int count = 0;
     int count = 0;
     String containerName = null;
     String containerName = null;
     DatanodeDetails dnDd1 = DatanodeDetails.newBuilder()
     DatanodeDetails dnDd1 = DatanodeDetails.newBuilder()
-        .setUuid("node1")
+        .setUuid(UUID.randomUUID().toString())
         .setIpAddress("127.0.0.1")
         .setIpAddress("127.0.0.1")
         .setHostName("localhost")
         .setHostName("localhost")
         .setContainerPort(0)
         .setContainerPort(0)
@@ -270,7 +270,7 @@ public class TestDeletedBlockLog {
         .setOzoneRestPort(0)
         .setOzoneRestPort(0)
         .build();
         .build();
     DatanodeDetails dnId2 = DatanodeDetails.newBuilder()
     DatanodeDetails dnId2 = DatanodeDetails.newBuilder()
-        .setUuid("node2")
+        .setUuid(UUID.randomUUID().toString())
         .setIpAddress("127.0.0.1")
         .setIpAddress("127.0.0.1")
         .setHostName("localhost")
         .setHostName("localhost")
         .setContainerPort(0)
         .setContainerPort(0)

+ 1 - 1
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/hdds/scm/container/MockNodeManager.java

@@ -315,7 +315,7 @@ public class MockNodeManager implements NodeManager {
 
 
   // Returns the number of commands that is queued to this node manager.
   // Returns the number of commands that is queued to this node manager.
   public int getCommandCount(DatanodeDetails dd) {
   public int getCommandCount(DatanodeDetails dd) {
-    List<SCMCommand> list = commandMap.get(dd);
+    List<SCMCommand> list = commandMap.get(dd.getUuid());
     return (list == null) ? 0 : list.size();
     return (list == null) ? 0 : list.size();
   }
   }
 
 

+ 3 - 0
hadoop-ozone/client/src/main/java/org/apache/hadoop/ozone/client/io/ChunkGroupOutputStream.java

@@ -294,6 +294,9 @@ public class ChunkGroupOutputStream extends OutputStream {
   @Override
   @Override
   public synchronized void flush() throws IOException {
   public synchronized void flush() throws IOException {
     checkNotClosed();
     checkNotClosed();
+    if (streamEntries.size() == 0) {
+      return;
+    }
     for (int i = 0; i <= currentStreamIndex; i++) {
     for (int i = 0; i <= currentStreamIndex; i++) {
       streamEntries.get(i).flush();
       streamEntries.get(i).flush();
     }
     }

+ 25 - 0
hadoop-tools/hadoop-ozone/pom.xml

@@ -85,6 +85,31 @@
       <artifactId>hadoop-hdds-common</artifactId>
       <artifactId>hadoop-hdds-common</artifactId>
       <scope>provided</scope>
       <scope>provided</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-server-scm</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-server-framework</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-ozone-ozone-manager</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-container-service</artifactId>
+      <scope>test</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.hadoop</groupId>
+      <artifactId>hadoop-hdds-client</artifactId>
+      <scope>test</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>
       <artifactId>hadoop-ozone-common</artifactId>
       <artifactId>hadoop-ozone-common</artifactId>

+ 2 - 2
hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFSInputStream.java

@@ -93,8 +93,8 @@ public class TestOzoneFSInputStream {
     String host = dataNode.getDatanodeHostname();
     String host = dataNode.getDatanodeHostname();
 
 
     // Set the fs.defaultFS and start the filesystem
     // Set the fs.defaultFS and start the filesystem
-    String uri = String.format("%s://%s:%d/%s/%s",
-        Constants.OZONE_URI_SCHEME, host, port, volumeName, bucketName);
+    String uri = String.format("%s://%s.%s/",
+        Constants.OZONE_URI_SCHEME, bucketName, volumeName);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
     conf.set(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, uri);
     fs =  FileSystem.get(conf);
     fs =  FileSystem.get(conf);
     int fileLen = 100 * 1024 * 1024;
     int fileLen = 100 * 1024 * 1024;

+ 3 - 9
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/TestOzoneFileInterfaces.java

@@ -32,14 +32,13 @@ import org.apache.commons.io.IOUtils;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.lang.RandomStringUtils;
 import org.junit.After;
 import org.junit.After;
 
 
-import org.apache.hadoop.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -99,7 +98,7 @@ public class TestOzoneFileInterfaces {
   public void init() throws Exception {
   public void init() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
     OzoneConfiguration conf = new OzoneConfiguration();
     cluster = new MiniOzoneClassicCluster.Builder(conf)
     cluster = new MiniOzoneClassicCluster.Builder(conf)
-        .numDataNodes(10)
+        .numDataNodes(3)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
         .setHandlerType(OzoneConsts.OZONE_HANDLER_DISTRIBUTED)
         .build();
         .build();
     storageHandler =
     storageHandler =
@@ -119,11 +118,6 @@ public class TestOzoneFileInterfaces {
     BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
     BucketArgs bucketArgs = new BucketArgs(volumeName, bucketName, userArgs);
     storageHandler.createBucket(bucketArgs);
     storageHandler.createBucket(bucketArgs);
 
 
-    // Fetch the host and port for File System init
-    DataNode dataNode = cluster.getDataNodes().get(0);
-    int port = dataNode.getInfoPort();
-    String host = dataNode.getDatanodeHostname();
-
     rootPath = String
     rootPath = String
         .format("%s://%s.%s/", Constants.OZONE_URI_SCHEME, bucketName,
         .format("%s://%s.%s/", Constants.OZONE_URI_SCHEME, bucketName,
             volumeName);
             volumeName);
@@ -147,7 +141,7 @@ public class TestOzoneFileInterfaces {
   public void testFileSystemInit() throws IOException {
   public void testFileSystemInit() throws IOException {
     if (setDefaultFs) {
     if (setDefaultFs) {
       assertTrue(
       assertTrue(
-          "The initialized file system is not OzoneFileSysetem but " +
+          "The initialized file system is not OzoneFileSystem but " +
               fs.getClass(),
               fs.getClass(),
           fs instanceof OzoneFileSystem);
           fs instanceof OzoneFileSystem);
       assertEquals(Constants.OZONE_URI_SCHEME, fs.getUri().getScheme());
       assertEquals(Constants.OZONE_URI_SCHEME, fs.getUri().getScheme());

+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractCreate.java


+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDelete.java


+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractDistCp.java


+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractGetFileStatus.java


+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractMkdir.java


+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractOpen.java


+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRename.java


+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractRootDir.java


+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/ITestOzoneContractSeek.java


+ 1 - 4
hadoop-tools/hadoop-ozone/src/todo/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java → hadoop-tools/hadoop-ozone/src/test/java/org/apache/hadoop/fs/ozone/contract/OzoneContract.java

@@ -20,12 +20,11 @@ package org.apache.hadoop.fs.ozone.contract;
 
 
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.commons.lang.RandomStringUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.contract.AbstractFSContract;
 import org.apache.hadoop.fs.ozone.Constants;
 import org.apache.hadoop.fs.ozone.Constants;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.ozone.MiniOzoneClassicCluster;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.OzoneConsts;
@@ -105,8 +104,6 @@ class OzoneContract extends AbstractFSContract {
     } catch (OzoneException e) {
     } catch (OzoneException e) {
       throw new IOException(e.getMessage());
       throw new IOException(e.getMessage());
     }
     }
-    DataNode dataNode = cluster.getDataNodes().get(0);
-    final int port = dataNode.getInfoPort();
 
 
     String uri = String.format("%s://%s.%s/",
     String uri = String.format("%s://%s.%s/",
         Constants.OZONE_URI_SCHEME, bucketName, volumeName);
         Constants.OZONE_URI_SCHEME, bucketName, volumeName);

+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/resources/contract/ozone.xml → hadoop-tools/hadoop-ozone/src/test/resources/contract/ozone.xml


+ 0 - 0
hadoop-tools/hadoop-ozone/src/todo/resources/log4j.properties → hadoop-tools/hadoop-ozone/src/test/resources/log4j.properties