瀏覽代碼

HDDS-183:Integrate Volumeset, ContainerSet and HddsDispatcher. Contributed by Bharat Viswanadham

Bharat Viswanadham 6 年之前
父節點
當前提交
52d1d9603e
共有 41 個文件被更改,包括 954 次插入448 次删除
  1. 1 0
      hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto
  2. 1 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java
  3. 50 46
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java
  4. 11 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java
  5. 19 9
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java
  6. 14 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java
  7. 8 7
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java
  8. 2 2
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java
  9. 1 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java
  10. 39 17
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java
  11. 2 1
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java
  12. 32 2
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java
  13. 6 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java
  14. 48 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java
  15. 5 47
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java
  16. 15 7
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java
  17. 16 6
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java
  18. 134 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java
  19. 157 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java
  20. 166 230
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
  21. 4 0
      hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java
  22. 11 2
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java
  23. 4 0
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java
  24. 1 2
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java
  25. 14 11
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java
  26. 2 4
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java
  27. 3 5
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java
  28. 1 2
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java
  29. 1 2
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java
  30. 6 4
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java
  31. 4 4
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java
  32. 108 0
      hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
  33. 4 0
      hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java
  34. 14 6
      hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java
  35. 7 4
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java
  36. 3 3
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java
  37. 3 3
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java
  38. 7 6
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
  39. 10 0
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java
  40. 7 7
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java
  41. 13 9
      hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java

+ 1 - 0
hadoop-hdds/common/src/main/proto/DatanodeContainerProtocol.proto

@@ -137,6 +137,7 @@ enum Result {
   CONTAINER_METADATA_ERROR = 31;
   CONTAINER_METADATA_ERROR = 31;
   CONTAINER_FILES_CREATE_ERROR = 32;
   CONTAINER_FILES_CREATE_ERROR = 32;
   CONTAINER_CHECKSUM_ERROR = 33;
   CONTAINER_CHECKSUM_ERROR = 33;
+  UNKNOWN_CONTAINER_TYPE = 34;
 }
 }
 
 
 /**
 /**

+ 1 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerData.java

@@ -17,6 +17,7 @@
  */
  */
 package org.apache.hadoop.ozone.container.common.impl;
 package org.apache.hadoop.ozone.container.common.impl;
 
 
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
     ContainerType;
     ContainerType;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.

+ 50 - 46
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueYaml.java → hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerDataYaml.java

@@ -16,11 +16,12 @@
  *  limitations under the License.
  *  limitations under the License.
  */
  */
 
 
-package org.apache.hadoop.ozone.container.keyvalue;
+package org.apache.hadoop.ozone.container.common.impl;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.yaml.snakeyaml.Yaml;
 import org.yaml.snakeyaml.Yaml;
 
 
 import java.beans.IntrospectionException;
 import java.beans.IntrospectionException;
@@ -46,13 +47,16 @@ import org.yaml.snakeyaml.nodes.ScalarNode;
 import org.yaml.snakeyaml.nodes.Tag;
 import org.yaml.snakeyaml.nodes.Tag;
 import org.yaml.snakeyaml.representer.Representer;
 import org.yaml.snakeyaml.representer.Representer;
 
 
+import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.YAML_FIELDS;
+import static org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData.YAML_TAG;
+
 /**
 /**
  * Class for creating and reading .container files.
  * Class for creating and reading .container files.
  */
  */
 
 
-public final class KeyValueYaml {
+public final class ContainerDataYaml {
 
 
-  private KeyValueYaml() {
+  private ContainerDataYaml() {
 
 
   }
   }
   /**
   /**
@@ -62,29 +66,39 @@ public final class KeyValueYaml {
    * @param containerData
    * @param containerData
    * @throws IOException
    * @throws IOException
    */
    */
-  public static void createContainerFile(File containerFile, ContainerData
-      containerData) throws IOException {
+  public static void createContainerFile(ContainerProtos.ContainerType
+                                             containerType, File containerFile,
+                                         ContainerData containerData) throws
+      IOException {
 
 
     Preconditions.checkNotNull(containerFile, "yamlFile cannot be null");
     Preconditions.checkNotNull(containerFile, "yamlFile cannot be null");
     Preconditions.checkNotNull(containerData, "containerData cannot be null");
     Preconditions.checkNotNull(containerData, "containerData cannot be null");
+    Preconditions.checkNotNull(containerType, "containerType cannot be null");
 
 
     PropertyUtils propertyUtils = new PropertyUtils();
     PropertyUtils propertyUtils = new PropertyUtils();
     propertyUtils.setBeanAccess(BeanAccess.FIELD);
     propertyUtils.setBeanAccess(BeanAccess.FIELD);
     propertyUtils.setAllowReadOnlyProperties(true);
     propertyUtils.setAllowReadOnlyProperties(true);
 
 
-    Representer representer = new KeyValueContainerDataRepresenter();
-    representer.setPropertyUtils(propertyUtils);
-    representer.addClassTag(
-        KeyValueContainerData.class, new Tag("KeyValueContainerData"));
-
-    Constructor keyValueDataConstructor = new KeyValueDataConstructor();
+    switch(containerType) {
+    case KeyValueContainer:
+      Representer representer = new ContainerDataRepresenter();
+      representer.setPropertyUtils(propertyUtils);
+      representer.addClassTag(KeyValueContainerData.class,
+          KeyValueContainerData.YAML_TAG);
 
 
-    Yaml yaml = new Yaml(keyValueDataConstructor, representer);
+      Constructor keyValueDataConstructor = new ContainerDataConstructor();
 
 
-    Writer writer = new OutputStreamWriter(new FileOutputStream(containerFile),
-        "UTF-8");
-    yaml.dump(containerData, writer);
-    writer.close();
+      Yaml yaml = new Yaml(keyValueDataConstructor, representer);
+      Writer writer = new OutputStreamWriter(new FileOutputStream(
+          containerFile), "UTF-8");
+      yaml.dump(containerData, writer);
+      writer.close();
+      break;
+    default:
+      throw new StorageContainerException("Unrecognized container Type " +
+          "format " + containerType, ContainerProtos.Result
+          .UNKNOWN_CONTAINER_TYPE);
+    }
   }
   }
 
 
   /**
   /**
@@ -93,57 +107,53 @@ public final class KeyValueYaml {
    * @param containerFile
    * @param containerFile
    * @throws IOException
    * @throws IOException
    */
    */
-  public static KeyValueContainerData readContainerFile(File containerFile)
+  public static ContainerData readContainerFile(File containerFile)
       throws IOException {
       throws IOException {
     Preconditions.checkNotNull(containerFile, "containerFile cannot be null");
     Preconditions.checkNotNull(containerFile, "containerFile cannot be null");
 
 
     InputStream input = null;
     InputStream input = null;
-    KeyValueContainerData keyValueContainerData;
+    ContainerData containerData;
     try {
     try {
       PropertyUtils propertyUtils = new PropertyUtils();
       PropertyUtils propertyUtils = new PropertyUtils();
       propertyUtils.setBeanAccess(BeanAccess.FIELD);
       propertyUtils.setBeanAccess(BeanAccess.FIELD);
       propertyUtils.setAllowReadOnlyProperties(true);
       propertyUtils.setAllowReadOnlyProperties(true);
 
 
-      Representer representer = new KeyValueContainerDataRepresenter();
+      Representer representer = new ContainerDataRepresenter();
       representer.setPropertyUtils(propertyUtils);
       representer.setPropertyUtils(propertyUtils);
-      representer.addClassTag(
-          KeyValueContainerData.class, new Tag("KeyValueContainerData"));
 
 
-      Constructor keyValueDataConstructor = new KeyValueDataConstructor();
+      Constructor containerDataConstructor = new ContainerDataConstructor();
 
 
-      Yaml yaml = new Yaml(keyValueDataConstructor, representer);
+      Yaml yaml = new Yaml(containerDataConstructor, representer);
       yaml.setBeanAccess(BeanAccess.FIELD);
       yaml.setBeanAccess(BeanAccess.FIELD);
 
 
       input = new FileInputStream(containerFile);
       input = new FileInputStream(containerFile);
-      keyValueContainerData = (KeyValueContainerData)
+      containerData = (ContainerData)
           yaml.load(input);
           yaml.load(input);
     } finally {
     } finally {
       if (input!= null) {
       if (input!= null) {
         input.close();
         input.close();
       }
       }
     }
     }
-    return keyValueContainerData;
+    return containerData;
   }
   }
 
 
   /**
   /**
    * Representer class to define which fields need to be stored in yaml file.
    * Representer class to define which fields need to be stored in yaml file.
    */
    */
-  private static class KeyValueContainerDataRepresenter extends Representer {
+  private static class ContainerDataRepresenter extends Representer {
     @Override
     @Override
     protected Set<Property> getProperties(Class<? extends Object> type)
     protected Set<Property> getProperties(Class<? extends Object> type)
         throws IntrospectionException {
         throws IntrospectionException {
       Set<Property> set = super.getProperties(type);
       Set<Property> set = super.getProperties(type);
       Set<Property> filtered = new TreeSet<Property>();
       Set<Property> filtered = new TreeSet<Property>();
+
+      // When a new Container type is added, we need to add what fields need
+      // to be filtered here
       if (type.equals(KeyValueContainerData.class)) {
       if (type.equals(KeyValueContainerData.class)) {
         // filter properties
         // filter properties
         for (Property prop : set) {
         for (Property prop : set) {
           String name = prop.getName();
           String name = prop.getName();
-          // When a new field needs to be added, it needs to be added here.
-          if (name.equals("containerType") || name.equals("containerId") ||
-              name.equals("layOutVersion") || name.equals("state") ||
-              name.equals("metadata") || name.equals("metadataPath") ||
-              name.equals("chunksPath") || name.equals(
-                  "containerDBType")) {
+          if (YAML_FIELDS.contains(name)) {
             filtered.add(prop);
             filtered.add(prop);
           }
           }
         }
         }
@@ -155,11 +165,12 @@ public final class KeyValueYaml {
   /**
   /**
    * Constructor class for KeyValueData, which will be used by Yaml.
    * Constructor class for KeyValueData, which will be used by Yaml.
    */
    */
-  private static class KeyValueDataConstructor extends Constructor {
-    KeyValueDataConstructor() {
+  private static class ContainerDataConstructor extends Constructor {
+    ContainerDataConstructor() {
       //Adding our own specific constructors for tags.
       //Adding our own specific constructors for tags.
-      this.yamlConstructors.put(new Tag("KeyValueContainerData"),
-          new ConstructKeyValueContainerData());
+      // When a new Container type is added, we need to add yamlConstructor
+      // for that
+      this.yamlConstructors.put(YAML_TAG, new ConstructKeyValueContainerData());
       this.yamlConstructors.put(Tag.INT, new ConstructLong());
       this.yamlConstructors.put(Tag.INT, new ConstructLong());
     }
     }
 
 
@@ -167,21 +178,14 @@ public final class KeyValueYaml {
       public Object construct(Node node) {
       public Object construct(Node node) {
         MappingNode mnode = (MappingNode) node;
         MappingNode mnode = (MappingNode) node;
         Map<Object, Object> nodes = constructMapping(mnode);
         Map<Object, Object> nodes = constructMapping(mnode);
-        String type = (String) nodes.get("containerType");
-
-        ContainerProtos.ContainerType containerType = ContainerProtos
-            .ContainerType.KeyValueContainer;
-        if (type.equals("KeyValueContainer")) {
-          containerType = ContainerProtos.ContainerType.KeyValueContainer;
-        }
 
 
         //Needed this, as TAG.INT type is by default converted to Long.
         //Needed this, as TAG.INT type is by default converted to Long.
         long layOutVersion = (long) nodes.get("layOutVersion");
         long layOutVersion = (long) nodes.get("layOutVersion");
         int lv = (int) layOutVersion;
         int lv = (int) layOutVersion;
 
 
         //When a new field is added, it needs to be added here.
         //When a new field is added, it needs to be added here.
-        KeyValueContainerData kvData = new KeyValueContainerData(containerType,
-            (long) nodes.get("containerId"), lv);
+        KeyValueContainerData kvData = new KeyValueContainerData((long) nodes
+            .get("containerId"), lv);
         kvData.setContainerDBType((String)nodes.get("containerDBType"));
         kvData.setContainerDBType((String)nodes.get("containerDBType"));
         kvData.setMetadataPath((String) nodes.get(
         kvData.setMetadataPath((String) nodes.get(
             "metadataPath"));
             "metadataPath"));

+ 11 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/Dispatcher.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.ozone.container.common.impl;
 package org.apache.hadoop.ozone.container.common.impl;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.ratis.shaded.com.google.protobuf.ByteString;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdds.scm.container.common.helpers
 import org.apache.hadoop.hdds.scm.container.common.helpers
@@ -90,6 +91,16 @@ public class Dispatcher implements ContainerDispatcher {
   public void shutdown() {
   public void shutdown() {
   }
   }
 
 
+  @Override
+  public Handler getHandler(ContainerProtos.ContainerType containerType) {
+    return null;
+  }
+
+  @Override
+  public void setScmId(String scmId) {
+    // DO nothing, this will be removed when cleanup.
+  }
+
   @Override
   @Override
   public ContainerCommandResponseProto dispatch(
   public ContainerCommandResponseProto dispatch(
       ContainerCommandRequestProto msg) {
       ContainerCommandRequestProto msg) {

+ 19 - 9
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java

@@ -52,24 +52,23 @@ public class HddsDispatcher implements ContainerDispatcher {
   private final Configuration conf;
   private final Configuration conf;
   private final ContainerSet containerSet;
   private final ContainerSet containerSet;
   private final VolumeSet volumeSet;
   private final VolumeSet volumeSet;
-  private final String scmID;
+  private String scmID;
 
 
   /**
   /**
    * Constructs an OzoneContainer that receives calls from
    * Constructs an OzoneContainer that receives calls from
    * XceiverServerHandler.
    * XceiverServerHandler.
    */
    */
   public HddsDispatcher(Configuration config, ContainerSet contSet,
   public HddsDispatcher(Configuration config, ContainerSet contSet,
-      VolumeSet volumes, String scmId) {
-    // TODO: Pass ContainerSet, VolumeSet and scmID, intialize metrics
+      VolumeSet volumes) {
+    //TODO: initialize metrics
     this.conf = config;
     this.conf = config;
     this.containerSet = contSet;
     this.containerSet = contSet;
     this.volumeSet = volumes;
     this.volumeSet = volumes;
-    this.scmID = scmId;
     this.handlers = Maps.newHashMap();
     this.handlers = Maps.newHashMap();
     for (ContainerType containerType : ContainerType.values()) {
     for (ContainerType containerType : ContainerType.values()) {
       handlers.put(containerType,
       handlers.put(containerType,
           Handler.getHandlerForContainerType(
           Handler.getHandlerForContainerType(
-              containerType, conf, containerSet, volumeSet, scmID));
+              containerType, conf, containerSet, volumeSet));
     }
     }
   }
   }
 
 
@@ -103,7 +102,7 @@ public class HddsDispatcher implements ContainerDispatcher {
       return ContainerUtils.logAndReturnError(LOG, ex, msg);
       return ContainerUtils.logAndReturnError(LOG, ex, msg);
     }
     }
 
 
-    Handler handler = getHandlerForContainerType(containerType);
+    Handler handler = getHandler(containerType);
     if (handler == null) {
     if (handler == null) {
       StorageContainerException ex = new StorageContainerException("Invalid " +
       StorageContainerException ex = new StorageContainerException("Invalid " +
           "ContainerType " + containerType,
           "ContainerType " + containerType,
@@ -113,9 +112,20 @@ public class HddsDispatcher implements ContainerDispatcher {
     return handler.handle(msg, container);
     return handler.handle(msg, container);
   }
   }
 
 
-  @VisibleForTesting
-  public Handler getHandlerForContainerType(ContainerType type) {
-    return handlers.get(type);
+  @Override
+  public Handler getHandler(ContainerProtos.ContainerType containerType) {
+    return handlers.get(containerType);
+  }
+
+  @Override
+  public void setScmId(String scmId) {
+    Preconditions.checkNotNull(scmId, "scmId Cannot be null");
+    if (this.scmID == null) {
+      this.scmID = scmId;
+      for (Map.Entry<ContainerType, Handler> handlerMap : handlers.entrySet()) {
+        handlerMap.getValue().setScmID(scmID);
+      }
+    }
   }
   }
 
 
   private long getContainerID(ContainerCommandRequestProto request)
   private long getContainerID(ContainerCommandRequestProto request)

+ 14 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerDispatcher.java

@@ -18,6 +18,7 @@
 
 
 package org.apache.hadoop.ozone.container.common.interfaces;
 package org.apache.hadoop.ozone.container.common.interfaces;
 
 
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
     .ContainerCommandRequestProto;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -48,4 +49,17 @@ public interface ContainerDispatcher {
    * Shutdown Dispatcher services.
    * Shutdown Dispatcher services.
    */
    */
   void shutdown();
   void shutdown();
+
+  /**
+   * Returns the handler for the specified containerType.
+   * @param containerType
+   * @return
+   */
+  Handler getHandler(ContainerProtos.ContainerType containerType);
+
+  /**
+   * If scmId is not set, this will set scmId, otherwise it is a no-op.
+   * @param scmId
+   */
+  void setScmId(String scmId);
 }
 }

+ 8 - 7
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/Handler.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 
 
-import java.io.IOException;
 
 
 /**
 /**
  * Dispatcher sends ContainerCommandRequests to Handler. Each Container Type
  * Dispatcher sends ContainerCommandRequests to Handler. Each Container Type
@@ -42,22 +41,20 @@ public class Handler {
   protected final Configuration conf;
   protected final Configuration conf;
   protected final ContainerSet containerSet;
   protected final ContainerSet containerSet;
   protected final VolumeSet volumeSet;
   protected final VolumeSet volumeSet;
-  protected final String scmID;
+  protected String scmID;
 
 
   protected Handler(Configuration config, ContainerSet contSet,
   protected Handler(Configuration config, ContainerSet contSet,
-      VolumeSet volumeSet, String scmID) {
+      VolumeSet volumeSet) {
     conf = config;
     conf = config;
     containerSet = contSet;
     containerSet = contSet;
     this.volumeSet = volumeSet;
     this.volumeSet = volumeSet;
-    this.scmID = scmID;
   }
   }
 
 
   public static Handler getHandlerForContainerType(ContainerType containerType,
   public static Handler getHandlerForContainerType(ContainerType containerType,
-      Configuration config, ContainerSet contSet, VolumeSet volumeSet,
-      String scmID) {
+      Configuration config, ContainerSet contSet, VolumeSet volumeSet) {
     switch (containerType) {
     switch (containerType) {
     case KeyValueContainer:
     case KeyValueContainer:
-      return KeyValueHandler.getInstance(config, contSet, volumeSet, scmID);
+      return KeyValueHandler.getInstance(config, contSet, volumeSet);
     default:
     default:
       throw new IllegalArgumentException("Handler for ContainerType: " +
       throw new IllegalArgumentException("Handler for ContainerType: " +
         containerType + "doesn't exist.");
         containerType + "doesn't exist.");
@@ -68,4 +65,8 @@ public class Handler {
       ContainerCommandRequestProto msg, Container container) {
       ContainerCommandRequestProto msg, Container container) {
     return null;
     return null;
   }
   }
+
+  public void setScmID(String scmId) {
+    this.scmID = scmId;
+  }
 }
 }

+ 2 - 2
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/DatanodeStateMachine.java

@@ -93,8 +93,8 @@ public class DatanodeStateMachine implements Closeable {
      // trick.
      // trick.
     commandDispatcher = CommandDispatcher.newBuilder()
     commandDispatcher = CommandDispatcher.newBuilder()
         .addHandler(new CloseContainerCommandHandler())
         .addHandler(new CloseContainerCommandHandler())
-        .addHandler(new DeleteBlocksCommandHandler(
-            container.getContainerManager(), conf))
+        .addHandler(new DeleteBlocksCommandHandler(container.getContainerSet(),
+            conf))
         .setConnectionManager(connectionManager)
         .setConnectionManager(connectionManager)
         .setContainer(container)
         .setContainer(container)
         .setContext(context)
         .setContext(context)

+ 1 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/background/BlockDeletingService.java

@@ -97,6 +97,7 @@ public class BlockDeletingService extends BackgroundService{
         OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT);
         OZONE_BLOCK_DELETING_CONTAINER_LIMIT_PER_INTERVAL_DEFAULT);
   }
   }
 
 
+
   @Override
   @Override
   public BackgroundTaskQueue getTasks() {
   public BackgroundTaskQueue getTasks() {
     BackgroundTaskQueue queue = new BackgroundTaskQueue();
     BackgroundTaskQueue queue = new BackgroundTaskQueue();

+ 39 - 17
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/DeleteBlocksCommandHandler.java

@@ -18,8 +18,10 @@ package org.apache.hadoop.ozone.container.common.statemachine.commandhandler;
 
 
 import com.google.common.primitives.Longs;
 import com.google.common.primitives.Longs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
     .StorageContainerDatanodeProtocolProtos.SCMCommandProto;
+import org.apache.hadoop.hdds.scm.container.common.helpers.StorageContainerException;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
     .StorageContainerDatanodeProtocolProtos.ContainerBlocksDeletionACKProto;
@@ -29,11 +31,13 @@ import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
     .StorageContainerDatanodeProtocolProtos.DeletedBlocksTransaction;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers
 import org.apache.hadoop.ozone.container.common.helpers
     .DeletedContainerBlocksSummary;
     .DeletedContainerBlocksSummary;
-import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
+import org.apache.hadoop.ozone.container.common.interfaces.Container;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.container.common.statemachine
 import org.apache.hadoop.ozone.container.common.statemachine
     .EndpointStateMachine;
     .EndpointStateMachine;
 import org.apache.hadoop.ozone.container.common.statemachine
 import org.apache.hadoop.ozone.container.common.statemachine
@@ -51,6 +55,8 @@ import org.slf4j.LoggerFactory;
 import java.io.IOException;
 import java.io.IOException;
 import java.util.List;
 import java.util.List;
 
 
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.CONTAINER_NOT_FOUND;
+
 /**
 /**
  * Handle block deletion commands.
  * Handle block deletion commands.
  */
  */
@@ -59,14 +65,14 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
   private static final Logger LOG =
   private static final Logger LOG =
       LoggerFactory.getLogger(DeleteBlocksCommandHandler.class);
       LoggerFactory.getLogger(DeleteBlocksCommandHandler.class);
 
 
-  private ContainerManager containerManager;
-  private Configuration conf;
+  private final ContainerSet containerSet;
+  private final Configuration conf;
   private int invocationCount;
   private int invocationCount;
   private long totalTime;
   private long totalTime;
 
 
-  public DeleteBlocksCommandHandler(ContainerManager containerManager,
+  public DeleteBlocksCommandHandler(ContainerSet cset,
       Configuration conf) {
       Configuration conf) {
-    this.containerManager = containerManager;
+    this.containerSet = cset;
     this.conf = conf;
     this.conf = conf;
   }
   }
 
 
@@ -105,8 +111,24 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
           DeleteBlockTransactionResult.newBuilder();
           DeleteBlockTransactionResult.newBuilder();
       txResultBuilder.setTxID(entry.getTxID());
       txResultBuilder.setTxID(entry.getTxID());
       try {
       try {
-        deleteContainerBlocks(entry, conf);
-        txResultBuilder.setSuccess(true);
+        long containerId = entry.getContainerID();
+        Container cont = containerSet.getContainer(containerId);
+        if(cont == null) {
+          throw new StorageContainerException("Unable to find the container "
+              + containerId, CONTAINER_NOT_FOUND);
+        }
+        ContainerProtos.ContainerType containerType = cont.getContainerType();
+        switch (containerType) {
+        case KeyValueContainer:
+          KeyValueContainerData containerData = (KeyValueContainerData)
+              cont.getContainerData();
+          deleteKeyValueContainerBlocks(containerData, entry);
+          txResultBuilder.setSuccess(true);
+          break;
+        default:
+          LOG.error("Delete Blocks Command Handler is not implemented for " +
+              "containerType {}", containerType);
+        }
       } catch (IOException e) {
       } catch (IOException e) {
         LOG.warn("Failed to delete blocks for container={}, TXID={}",
         LOG.warn("Failed to delete blocks for container={}, TXID={}",
             entry.getContainerID(), entry.getTxID(), e);
             entry.getContainerID(), entry.getTxID(), e);
@@ -145,21 +167,21 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
    * Move a bunch of blocks from a container to deleting state.
    * Move a bunch of blocks from a container to deleting state.
    * This is a meta update, the actual deletes happen in async mode.
    * This is a meta update, the actual deletes happen in async mode.
    *
    *
+   * @param containerData - KeyValueContainerData
    * @param delTX a block deletion transaction.
    * @param delTX a block deletion transaction.
-   * @param config configuration.
    * @throws IOException if I/O error occurs.
    * @throws IOException if I/O error occurs.
    */
    */
-  private void deleteContainerBlocks(DeletedBlocksTransaction delTX,
-      Configuration config) throws IOException {
+  private void deleteKeyValueContainerBlocks(
+      KeyValueContainerData containerData, DeletedBlocksTransaction delTX)
+      throws IOException {
     long containerId = delTX.getContainerID();
     long containerId = delTX.getContainerID();
-    ContainerData containerInfo = containerManager.readContainer(containerId);
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
       LOG.debug("Processing Container : {}, DB path : {}", containerId,
       LOG.debug("Processing Container : {}, DB path : {}", containerId,
-          containerInfo.getDBPath());
+          containerData.getMetadataPath());
     }
     }
 
 
     int newDeletionBlocks = 0;
     int newDeletionBlocks = 0;
-    MetadataStore containerDB = KeyUtils.getDB(containerInfo, config);
+    MetadataStore containerDB = KeyUtils.getDB(containerData, conf);
     for (Long blk : delTX.getLocalIDList()) {
     for (Long blk : delTX.getLocalIDList()) {
       BatchOperation batch = new BatchOperation();
       BatchOperation batch = new BatchOperation();
       byte[] blkBytes = Longs.toByteArray(blk);
       byte[] blkBytes = Longs.toByteArray(blk);
@@ -187,12 +209,12 @@ public class DeleteBlocksCommandHandler implements CommandHandler {
                 + " container {}, skip deleting it.", blk, containerId);
                 + " container {}, skip deleting it.", blk, containerId);
       }
       }
       containerDB.put(DFSUtil.string2Bytes(
       containerDB.put(DFSUtil.string2Bytes(
-          OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + delTX.getContainerID()),
+          OzoneConsts.DELETE_TRANSACTION_KEY_PREFIX + containerId),
           Longs.toByteArray(delTX.getTxID()));
           Longs.toByteArray(delTX.getTxID()));
     }
     }
 
 
     // update pending deletion blocks count in in-memory container status
     // update pending deletion blocks count in in-memory container status
-    containerManager.incrPendingDeletionBlocks(newDeletionBlocks, containerId);
+    containerData.incrPendingDeletionBlocks(newDeletionBlocks);
   }
   }
 
 
   @Override
   @Override

+ 2 - 1
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/datanode/RunningDatanodeState.java

@@ -95,7 +95,8 @@ public class RunningDatanodeState implements DatanodeState {
       getEndPointTask(EndpointStateMachine endpoint) {
       getEndPointTask(EndpointStateMachine endpoint) {
     switch (endpoint.getState()) {
     switch (endpoint.getState()) {
     case GETVERSION:
     case GETVERSION:
-      return new VersionEndpointTask(endpoint, conf);
+      return new VersionEndpointTask(endpoint, conf, context.getParent()
+          .getContainer());
     case REGISTER:
     case REGISTER:
       return  RegisterEndpointTask.newBuilder()
       return  RegisterEndpointTask.newBuilder()
           .setConfig(conf)
           .setConfig(conf)

+ 32 - 2
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/states/endpoint/VersionEndpointTask.java

@@ -16,14 +16,22 @@
  */
  */
 package org.apache.hadoop.ozone.container.common.states.endpoint;
 package org.apache.hadoop.ozone.container.common.states.endpoint;
 
 
+import com.google.common.base.Preconditions;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
     .StorageContainerDatanodeProtocolProtos.SCMVersionResponseProto;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.statemachine
 import org.apache.hadoop.ozone.container.common.statemachine
     .EndpointStateMachine;
     .EndpointStateMachine;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.util.List;
+import java.util.Map;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Callable;
 
 
 /**
 /**
@@ -33,11 +41,13 @@ public class VersionEndpointTask implements
     Callable<EndpointStateMachine.EndPointStates> {
     Callable<EndpointStateMachine.EndPointStates> {
   private final EndpointStateMachine rpcEndPoint;
   private final EndpointStateMachine rpcEndPoint;
   private final Configuration configuration;
   private final Configuration configuration;
+  private final OzoneContainer ozoneContainer;
 
 
   public VersionEndpointTask(EndpointStateMachine rpcEndPoint,
   public VersionEndpointTask(EndpointStateMachine rpcEndPoint,
-      Configuration conf) {
+                             Configuration conf, OzoneContainer container) {
     this.rpcEndPoint = rpcEndPoint;
     this.rpcEndPoint = rpcEndPoint;
     this.configuration = conf;
     this.configuration = conf;
+    this.ozoneContainer = container;
   }
   }
 
 
   /**
   /**
@@ -52,7 +62,27 @@ public class VersionEndpointTask implements
     try{
     try{
       SCMVersionResponseProto versionResponse =
       SCMVersionResponseProto versionResponse =
           rpcEndPoint.getEndPoint().getVersion(null);
           rpcEndPoint.getEndPoint().getVersion(null);
-      rpcEndPoint.setVersion(VersionResponse.getFromProtobuf(versionResponse));
+      VersionResponse response = VersionResponse.getFromProtobuf(
+          versionResponse);
+      rpcEndPoint.setVersion(response);
+      VolumeSet volumeSet = ozoneContainer.getVolumeSet();
+      Map<String, HddsVolume> volumeMap = volumeSet.getVolumeMap();
+      List<HddsProtos.KeyValue> keyValues =  versionResponse.getKeysList();
+
+      String scmId = response.getValue(OzoneConsts.SCM_ID);
+      String clusterId = response.getValue(OzoneConsts.CLUSTER_ID);
+
+      Preconditions.checkNotNull(scmId, "Reply from SCM: scmId cannot be " +
+          "null");
+      Preconditions.checkNotNull(scmId, "Reply from SCM: clusterId cannot be" +
+          " null");
+
+      // If version file does not exist create version file and also set scmId
+      for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
+        HddsVolume hddsVolume = entry.getValue();
+        hddsVolume.format(clusterId);
+        ozoneContainer.getDispatcher().setScmId(scmId);
+      }
 
 
       EndpointStateMachine.EndPointStates nextState =
       EndpointStateMachine.EndPointStates nextState =
           rpcEndPoint.getState().getNextState();
           rpcEndPoint.getState().getNextState();

+ 6 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/HddsVolume.java

@@ -130,6 +130,10 @@ public final class HddsVolume {
     initialize();
     initialize();
   }
   }
 
 
+  public VolumeInfo getVolumeInfo() {
+    return volumeInfo;
+  }
+
   /**
   /**
    * Initializes the volume.
    * Initializes the volume.
    * Creates the Version file if not present,
    * Creates the Version file if not present,
@@ -327,4 +331,6 @@ public final class HddsVolume {
   public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
   public void setScmUsageForTesting(GetSpaceUsed scmUsageForTest) {
     volumeInfo.setScmUsageForTesting(scmUsageForTest);
     volumeInfo.setScmUsageForTesting(scmUsageForTest);
   }
   }
+
+
 }
 }

+ 48 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/volume/VolumeSet.java

@@ -27,8 +27,13 @@ import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.protocol.proto
+    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
 import org.apache.hadoop.ozone.common.InconsistentStorageStateException;
+import org.apache.hadoop.ozone.container.common.impl.StorageLocationReport;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.ozone.container.common.utils.HddsVolumeUtil;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume.VolumeState;
 import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
 import org.apache.hadoop.ozone.container.common.interfaces.VolumeChoosingPolicy;
@@ -309,4 +314,47 @@ public class VolumeSet {
   public Map<StorageType, List<HddsVolume>> getVolumeStateMap() {
   public Map<StorageType, List<HddsVolume>> getVolumeStateMap() {
     return ImmutableMap.copyOf(volumeStateMap);
     return ImmutableMap.copyOf(volumeStateMap);
   }
   }
+
+  public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport()
+      throws IOException {
+    boolean failed;
+    StorageLocationReport[] reports =
+        new StorageLocationReport[volumeMap.size()];
+    int counter = 0;
+    for (Map.Entry<String, HddsVolume> entry : volumeMap.entrySet()) {
+      HddsVolume hddsVolume = entry.getValue();
+      VolumeInfo volumeInfo = hddsVolume.getVolumeInfo();
+      long scmUsed = 0;
+      long remaining = 0;
+      failed = false;
+      try {
+        scmUsed = volumeInfo.getScmUsed();
+        remaining = volumeInfo.getAvailable();
+      } catch (IOException ex) {
+        LOG.warn("Failed to get scmUsed and remaining for container " +
+            "storage location {}", volumeInfo.getRootDir());
+        // reset scmUsed and remaining if df/du failed.
+        scmUsed = 0;
+        remaining = 0;
+        failed = true;
+      }
+
+      StorageLocationReport.Builder builder =
+          StorageLocationReport.newBuilder();
+      builder.setStorageLocation(volumeInfo.getRootDir())
+          .setId(hddsVolume.getStorageID())
+          .setFailed(failed)
+          .setCapacity(hddsVolume.getCapacity())
+          .setRemaining(remaining)
+          .setScmUsed(scmUsed)
+          .setStorageType(hddsVolume.getStorageType());
+      StorageLocationReport r = builder.build();
+      reports[counter++] = r;
+    }
+    NodeReportProto.Builder nrb = NodeReportProto.newBuilder();
+    for (int i = 0; i < reports.length; i++) {
+      nrb.addStorageReport(reports[i].getProtoBufMessage());
+    }
+    return nrb.build();
+  }
 }
 }

+ 5 - 47
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainer.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.ozone.container.keyvalue;
 package org.apache.hadoop.ozone.container.keyvalue;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
-import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
@@ -33,6 +32,7 @@ import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.io.nativeio.NativeIO;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
@@ -47,21 +47,16 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
 import java.io.File;
 import java.io.File;
-import java.io.FileInputStream;
 import java.io.FileOutputStream;
 import java.io.FileOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.OutputStreamWriter;
 import java.io.OutputStreamWriter;
 import java.io.Writer;
 import java.io.Writer;
 
 
-import java.security.MessageDigest;
-import java.security.NoSuchAlgorithmException;
 import java.util.Map;
 import java.util.Map;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.CONTAINER_ALREADY_EXISTS;
     .Result.CONTAINER_ALREADY_EXISTS;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.CONTAINER_CHECKSUM_ERROR;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.CONTAINER_METADATA_ERROR;
     .Result.CONTAINER_METADATA_ERROR;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
@@ -74,8 +69,6 @@ import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.ERROR_IN_COMPACT_DB;
     .Result.ERROR_IN_COMPACT_DB;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.INVALID_CONTAINER_STATE;
     .Result.INVALID_CONTAINER_STATE;
-import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
-    .Result.NO_SUCH_ALGORITHM;
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .Result.UNSUPPORTED_REQUEST;
     .Result.UNSUPPORTED_REQUEST;
 
 
@@ -198,10 +191,12 @@ public class KeyValueContainer implements Container {
     try {
     try {
       tempContainerFile = createTempFile(containerFile);
       tempContainerFile = createTempFile(containerFile);
       tempCheckSumFile = createTempFile(containerCheckSumFile);
       tempCheckSumFile = createTempFile(containerCheckSumFile);
-      KeyValueYaml.createContainerFile(tempContainerFile, containerData);
+      ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
+              .KeyValueContainer, tempContainerFile, containerData);
 
 
       //Compute Checksum for container file
       //Compute Checksum for container file
-      String checksum = computeCheckSum(tempContainerFile);
+      String checksum = KeyValueContainerUtil.computeCheckSum(containerId,
+          tempContainerFile);
       containerCheckSumStream = new FileOutputStream(tempCheckSumFile);
       containerCheckSumStream = new FileOutputStream(tempCheckSumFile);
       writer = new OutputStreamWriter(containerCheckSumStream, "UTF-8");
       writer = new OutputStreamWriter(containerCheckSumStream, "UTF-8");
       writer.write(checksum);
       writer.write(checksum);
@@ -308,43 +303,6 @@ public class KeyValueContainer implements Container {
   }
   }
 
 
 
 
-  /**
-   * Compute checksum of the .container file.
-   * @param containerFile
-   * @throws StorageContainerException
-   */
-  private String computeCheckSum(File containerFile) throws
-      StorageContainerException {
-
-    MessageDigest sha;
-    FileInputStream containerFileStream = null;
-    try {
-      sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
-    } catch (NoSuchAlgorithmException e) {
-      throw new StorageContainerException("Unable to create Message Digest,"
-          + " usually this is a java configuration issue.",
-          NO_SUCH_ALGORITHM);
-    }
-
-    try {
-      containerFileStream = new FileInputStream(containerFile);
-      byte[] byteArray = new byte[1024];
-      int bytesCount = 0;
-
-      while ((bytesCount = containerFileStream.read(byteArray)) != -1) {
-        sha.update(byteArray, 0, bytesCount);
-      }
-      String checksum = DigestUtils.sha256Hex(sha.digest());
-      return checksum;
-    } catch (IOException ex) {
-      throw new StorageContainerException("Error during update of " +
-          "check sum file. Container Name: " + containerData.getContainerId(),
-          ex, CONTAINER_CHECKSUM_ERROR);
-    } finally {
-      IOUtils.closeStream(containerFileStream);
-    }
-  }
-
   @Override
   @Override
   public void delete(boolean forceDelete)
   public void delete(boolean forceDelete)
       throws StorageContainerException {
       throws StorageContainerException {

+ 15 - 7
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueContainerData.java

@@ -18,12 +18,14 @@
 
 
 package org.apache.hadoop.ozone.container.keyvalue;
 package org.apache.hadoop.ozone.container.keyvalue;
 
 
+import com.google.common.collect.Lists;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.yaml.snakeyaml.nodes.Tag;
 
 
 
 
 import java.io.File;
 import java.io.File;
-import java.io.IOException;
+import java.util.List;
 import java.util.Map;
 import java.util.Map;
 
 
 /**
 /**
@@ -33,6 +35,14 @@ import java.util.Map;
  */
  */
 public class KeyValueContainerData extends ContainerData {
 public class KeyValueContainerData extends ContainerData {
 
 
+  // Yaml Tag used for KeyValueContainerData.
+  public static final Tag YAML_TAG = new Tag("KeyValueContainerData");
+
+  // Fields need to be stored in .container file.
+  public static final List<String> YAML_FIELDS = Lists.newArrayList(
+      "containerType", "containerId", "layOutVersion", "state", "metadata",
+      "metadataPath", "chunksPath", "containerDBType");
+
   // Path to Container metadata Level DB/RocksDB Store and .container file.
   // Path to Container metadata Level DB/RocksDB Store and .container file.
   private String metadataPath;
   private String metadataPath;
 
 
@@ -49,23 +59,21 @@ public class KeyValueContainerData extends ContainerData {
 
 
   /**
   /**
    * Constructs KeyValueContainerData object.
    * Constructs KeyValueContainerData object.
-   * @param type - containerType
    * @param id - ContainerId
    * @param id - ContainerId
    */
    */
-  public KeyValueContainerData(ContainerProtos.ContainerType type, long id) {
-    super(type, id);
+  public KeyValueContainerData(long id) {
+    super(ContainerProtos.ContainerType.KeyValueContainer, id);
     this.numPendingDeletionBlocks = 0;
     this.numPendingDeletionBlocks = 0;
   }
   }
 
 
   /**
   /**
    * Constructs KeyValueContainerData object.
    * Constructs KeyValueContainerData object.
-   * @param type - containerType
    * @param id - ContainerId
    * @param id - ContainerId
    * @param layOutVersion
    * @param layOutVersion
    */
    */
-  public KeyValueContainerData(ContainerProtos.ContainerType type, long id,
+  public KeyValueContainerData(long id,
                                int layOutVersion) {
                                int layOutVersion) {
-    super(type, id, layOutVersion);
+    super(ContainerProtos.ContainerType.KeyValueContainer, id, layOutVersion);
     this.numPendingDeletionBlocks = 0;
     this.numPendingDeletionBlocks = 0;
   }
   }
 
 

+ 16 - 6
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/KeyValueHandler.java

@@ -18,6 +18,7 @@
 
 
 package org.apache.hadoop.ozone.container.keyvalue;
 package org.apache.hadoop.ozone.container.keyvalue;
 
 
+import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.ByteString;
 import com.google.protobuf.ByteString;
 import com.sun.jersey.spi.resource.Singleton;
 import com.sun.jersey.spi.resource.Singleton;
@@ -93,16 +94,16 @@ public class KeyValueHandler extends Handler {
   // TODO : Add metrics and populate it.
   // TODO : Add metrics and populate it.
 
 
   public static KeyValueHandler getInstance(Configuration config,
   public static KeyValueHandler getInstance(Configuration config,
-      ContainerSet contSet, VolumeSet volSet, String scmID) {
+      ContainerSet contSet, VolumeSet volSet) {
     if (INSTANCE == null) {
     if (INSTANCE == null) {
-      INSTANCE = new KeyValueHandler(config, contSet, volSet, scmID);
+      INSTANCE = new KeyValueHandler(config, contSet, volSet);
     }
     }
     return INSTANCE;
     return INSTANCE;
   }
   }
 
 
   private KeyValueHandler(Configuration config, ContainerSet contSet,
   private KeyValueHandler(Configuration config, ContainerSet contSet,
-      VolumeSet volSet, String scmID) {
-    super(config, contSet, volSet, scmID);
+      VolumeSet volSet) {
+    super(config, contSet, volSet);
     containerType = ContainerType.KeyValueContainer;
     containerType = ContainerType.KeyValueContainer;
     keyManager = new KeyManagerImpl(config);
     keyManager = new KeyManagerImpl(config);
     chunkManager = new ChunkManagerImpl();
     chunkManager = new ChunkManagerImpl();
@@ -156,6 +157,16 @@ public class KeyValueHandler extends Handler {
     return null;
     return null;
   }
   }
 
 
+  @VisibleForTesting
+  public ChunkManager getChunkManager() {
+    return this.chunkManager;
+  }
+
+  @VisibleForTesting
+  public KeyManager getKeyManager() {
+    return this.keyManager;
+  }
+
   /**
   /**
    * Handles Create Container Request. If successful, adds the container to
    * Handles Create Container Request. If successful, adds the container to
    * ContainerSet.
    * ContainerSet.
@@ -180,7 +191,7 @@ public class KeyValueHandler extends Handler {
     }
     }
 
 
     KeyValueContainerData newContainerData = new KeyValueContainerData(
     KeyValueContainerData newContainerData = new KeyValueContainerData(
-        containerType, containerID);
+        containerID);
     // TODO: Add support to add metadataList to ContainerData. Add metadata
     // TODO: Add support to add metadataList to ContainerData. Add metadata
     // to container during creation.
     // to container during creation.
     KeyValueContainer newContainer = new KeyValueContainer(
     KeyValueContainer newContainer = new KeyValueContainer(
@@ -262,7 +273,6 @@ public class KeyValueHandler extends Handler {
 
 
     boolean forceDelete = request.getDeleteContainer().getForceDelete();
     boolean forceDelete = request.getDeleteContainer().getForceDelete();
     kvContainer.writeLock();
     kvContainer.writeLock();
-
     try {
     try {
       // Check if container is open
       // Check if container is open
       if (kvContainer.getContainerData().isOpen()) {
       if (kvContainer.getContainerData().isOpen()) {

+ 134 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/keyvalue/helpers/KeyValueContainerUtil.java

@@ -18,9 +18,11 @@
 package org.apache.hadoop.ozone.container.keyvalue.helpers;
 package org.apache.hadoop.ozone.container.keyvalue.helpers;
 
 
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandRequestProto;
     .ContainerCommandRequestProto;
@@ -28,15 +30,29 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos
     .ContainerCommandResponseProto;
     .ContainerCommandResponseProto;
 import org.apache.hadoop.hdds.scm.container.common.helpers
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
     .StorageContainerException;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
+import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.utils.MetadataKeyFilters;
 import org.apache.hadoop.utils.MetadataStore;
 import org.apache.hadoop.utils.MetadataStore;
 import org.apache.hadoop.utils.MetadataStoreBuilder;
 import org.apache.hadoop.utils.MetadataStoreBuilder;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
 import java.io.File;
 import java.io.File;
+import java.io.FileInputStream;
 import java.io.IOException;
 import java.io.IOException;
+import java.nio.file.Files;
+import java.nio.file.Path;
+import java.nio.file.Paths;
+import java.security.MessageDigest;
+import java.security.NoSuchAlgorithmException;
+import java.util.List;
+import java.util.Map;
+
+import static org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos.Result.*;
 
 
 /**
 /**
  * Class which defines utility methods for KeyValueContainer.
  * Class which defines utility methods for KeyValueContainer.
@@ -170,4 +186,122 @@ public final class KeyValueContainerUtil {
     builder.setReadContainer(response);
     builder.setReadContainer(response);
     return builder.build();
     return builder.build();
   }
   }
+
+  /**
+   * Compute checksum of the .container file.
+   * @param containerId
+   * @param containerFile
+   * @throws StorageContainerException
+   */
+  public static String computeCheckSum(long containerId, File
+      containerFile) throws StorageContainerException {
+    Preconditions.checkNotNull(containerFile, "containerFile cannot be null");
+    MessageDigest sha;
+    FileInputStream containerFileStream = null;
+    try {
+      sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
+    } catch (NoSuchAlgorithmException e) {
+      throw new StorageContainerException("Unable to create Message Digest, " +
+          "usually this is a java configuration issue.", NO_SUCH_ALGORITHM);
+    }
+    try {
+      containerFileStream = new FileInputStream(containerFile);
+      byte[] byteArray = new byte[1024];
+      int bytesCount = 0;
+      while ((bytesCount = containerFileStream.read(byteArray)) != -1) {
+        sha.update(byteArray, 0, bytesCount);
+      }
+      String checksum = DigestUtils.sha256Hex(sha.digest());
+      return checksum;
+    } catch (IOException ex) {
+      throw new StorageContainerException("Error during computing checksum: " +
+          "for container " + containerId, ex, CONTAINER_CHECKSUM_ERROR);
+    } finally {
+      IOUtils.closeStream(containerFileStream);
+    }
+  }
+
+  /**
+   * Verify checksum of the container.
+   * @param containerId
+   * @param checksumFile
+   * @param checksum
+   * @throws StorageContainerException
+   */
+  public static void verifyCheckSum(long containerId, File checksumFile,
+                                    String checksum)
+      throws StorageContainerException {
+    try {
+      Preconditions.checkNotNull(checksum);
+      Preconditions.checkNotNull(checksumFile);
+      Path path = Paths.get(checksumFile.getAbsolutePath());
+      List<String> fileCheckSum = Files.readAllLines(path);
+      Preconditions.checkState(fileCheckSum.size() == 1, "checksum " +
+          "should be 32 byte string");
+      if (!checksum.equals(fileCheckSum.get(0))) {
+        LOG.error("Checksum mismatch for the container {}", containerId);
+        throw new StorageContainerException("Checksum mismatch for " +
+            "the container " + containerId, CHECKSUM_MISMATCH);
+      }
+    } catch (StorageContainerException ex) {
+      throw ex;
+    } catch (IOException ex) {
+      LOG.error("Error during verify checksum for container {}", containerId);
+      throw new StorageContainerException("Error during verify checksum" +
+          " for container " + containerId, IO_EXCEPTION);
+    }
+  }
+
+  /**
+   * Parse KeyValueContainerData and verify checksum.
+   * @param containerData
+   * @param containerFile
+   * @param checksumFile
+   * @param dbFile
+   * @param config
+   * @throws IOException
+   */
+  public static void parseKeyValueContainerData(
+      KeyValueContainerData containerData, File containerFile, File
+      checksumFile, File dbFile, OzoneConfiguration config) throws IOException {
+
+    Preconditions.checkNotNull(containerData, "containerData cannot be null");
+    Preconditions.checkNotNull(containerFile, "containerFile cannot be null");
+    Preconditions.checkNotNull(checksumFile, "checksumFile cannot be null");
+    Preconditions.checkNotNull(dbFile, "dbFile cannot be null");
+    Preconditions.checkNotNull(config, "ozone config cannot be null");
+
+    long containerId = containerData.getContainerId();
+    String containerName = String.valueOf(containerId);
+    File metadataPath = new File(containerData.getMetadataPath());
+
+    Preconditions.checkNotNull(containerName, "container Name cannot be " +
+        "null");
+    Preconditions.checkNotNull(metadataPath, "metadata path cannot be " +
+        "null");
+
+    // Verify Checksum
+    String checksum = KeyValueContainerUtil.computeCheckSum(
+        containerData.getContainerId(), containerFile);
+    KeyValueContainerUtil.verifyCheckSum(containerId, checksumFile, checksum);
+
+    containerData.setDbFile(dbFile);
+
+    MetadataStore metadata = KeyUtils.getDB(containerData, config);
+    long bytesUsed = 0;
+    List<Map.Entry<byte[], byte[]>> liveKeys = metadata
+        .getRangeKVs(null, Integer.MAX_VALUE,
+            MetadataKeyFilters.getNormalKeyFilter());
+    bytesUsed = liveKeys.parallelStream().mapToLong(e-> {
+      KeyData keyData;
+      try {
+        keyData = KeyUtils.getKeyData(e.getValue());
+        return keyData.getSize();
+      } catch (IOException ex) {
+        return 0L;
+      }
+    }).sum();
+    containerData.setBytesUsed(bytesUsed);
+  }
+
 }
 }

+ 157 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/ContainerReader.java

@@ -0,0 +1,157 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.ozoneimpl;
+
+import com.google.common.base.Preconditions;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.common.Storage;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerLocationUtil;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyValueContainerUtil;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.File;
+import java.io.FileFilter;
+import java.io.IOException;
+
+
+/**
+ * Class used to read .container files from Volume and build container map.
+ */
+public class ContainerReader implements Runnable {
+
+  private static final Logger LOG = LoggerFactory.getLogger(
+      ContainerReader.class);
+  private File hddsVolumeDir;
+  private final ContainerSet containerSet;
+  private final OzoneConfiguration config;
+
+  ContainerReader(File volumeRoot, ContainerSet cset, OzoneConfiguration conf) {
+    Preconditions.checkNotNull(volumeRoot);
+    this.hddsVolumeDir = volumeRoot;
+    this.containerSet = cset;
+    this.config = conf;
+  }
+
+  @Override
+  public void run() {
+    try {
+      readVolume(hddsVolumeDir);
+    } catch (RuntimeException ex) {
+      LOG.info("Caught an Run time exception during reading container files" +
+          " from Volume {}", hddsVolumeDir);
+    }
+  }
+
+  public void readVolume(File hddsVolumeRootDir) {
+    Preconditions.checkNotNull(hddsVolumeRootDir, "hddsVolumeRootDir" +
+        "cannot be null");
+
+
+    /**
+     *
+     * layout of the container directory on the disk.
+     * /hdds/<<scmUuid>>/current/<<containerdir>>/</containerID>/metadata
+     * /<<containerID>>.container
+     * /hdds/<<scmUuid>>/current/<<containerdir>>/<<containerID>>/metadata
+     * /<<containerID>>.checksum
+     * /hdds/<<scmUuid>>/current/<<containerdir>>/<<containerID>>/metadata
+     * /<<containerID>>.db
+     * /hdds/<<scmUuid>>/current/<<containerdir>>/<<containerID>>/chunks
+     * /<<chunkFile>>
+     *
+     **/
+
+    //filtering scm directory
+    File[] scmDir = hddsVolumeRootDir.listFiles(new FileFilter() {
+      @Override
+      public boolean accept(File pathname) {
+        return pathname.isDirectory();
+      }
+    });
+
+    for (File scmLoc : scmDir) {
+      File currentDir = null;
+      currentDir = new File(scmLoc, Storage.STORAGE_DIR_CURRENT);
+      File[] containerTopDirs = currentDir.listFiles();
+      if (containerTopDirs != null) {
+        for (File containerTopDir : containerTopDirs) {
+          if (containerTopDir.isDirectory()) {
+            File[] containerDirs = containerTopDir.listFiles();
+            for (File containerDir : containerDirs) {
+              File metadataPath = new File(containerDir + File.separator +
+                  OzoneConsts.CONTAINER_META_PATH);
+              String containerName = containerDir.getName();
+              if (metadataPath.exists()) {
+                File containerFile = KeyValueContainerLocationUtil
+                    .getContainerFile(metadataPath, containerName);
+                File checksumFile = KeyValueContainerLocationUtil
+                    .getContainerCheckSumFile(metadataPath, containerName);
+                File dbFile = KeyValueContainerLocationUtil
+                    .getContainerDBFile(metadataPath, containerName);
+                if (containerFile.exists() && checksumFile.exists() &&
+                    dbFile.exists()) {
+                  verifyContainerFile(containerFile, checksumFile, dbFile);
+                } else {
+                  LOG.error("Missing container metadata files for Container: " +
+                      "{}", containerName);
+                }
+              } else {
+                LOG.error("Missing container metadata directory for " +
+                    "Container: {}", containerName);
+              }
+            }
+          }
+        }
+      }
+    }
+  }
+
+  private void verifyContainerFile(File containerFile, File checksumFile,
+                                   File dbFile) {
+    try {
+      ContainerData containerData =  ContainerDataYaml.readContainerFile(
+          containerFile);
+
+      switch (containerData.getContainerType()) {
+      case KeyValueContainer:
+        KeyValueContainerData keyValueContainerData = (KeyValueContainerData)
+            containerData;
+        KeyValueContainerUtil.parseKeyValueContainerData(keyValueContainerData,
+            containerFile, checksumFile, dbFile, config);
+        KeyValueContainer keyValueContainer = new KeyValueContainer(
+            keyValueContainerData, config);
+        containerSet.addContainer(keyValueContainer);
+        break;
+      default:
+        LOG.error("Unrecognized ContainerType {} format during verify " +
+            "ContainerFile", containerData.getContainerType());
+      }
+    } catch (IOException ex) {
+      LOG.error("Error during reading container file {}", containerFile);
+    }
+  }
+
+}

+ 166 - 230
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java

@@ -1,72 +1,49 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one or more
- * contributor license agreements.  See the NOTICE file distributed with this
- * work for additional information regarding copyright ownership.  The ASF
- * licenses this file to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- * <p>
- * http://www.apache.org/licenses/LICENSE-2.0
- * <p>
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
- * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
- * License for the specific language governing permissions and limitations under
- * the License.
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
  */
  */
 
 
 package org.apache.hadoop.ozone.container.ozoneimpl;
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.ContainerReportsProto;
-import org.apache.hadoop.hdds.scm.ScmConfigKeys;
-import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
 import org.apache.hadoop.hdds.protocol.proto.HddsProtos;
-import org.apache.hadoop.hdds.protocol.proto
-    .StorageContainerDatanodeProtocolProtos.NodeReportProto;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.impl.ChunkManagerImpl;
-import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
-import org.apache.hadoop.ozone.container.common.impl.Dispatcher;
-import org.apache.hadoop.ozone.container.common.impl.KeyManagerImpl;
-import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager;
+import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.common.impl.HddsDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerDispatcher;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
-import org.apache.hadoop.ozone.container.common.interfaces.KeyManager;
-import org.apache.hadoop.ozone.container.common.statemachine.background
-    .BlockDeletingService;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer;
 import org.apache.hadoop.ozone.container.common.transport.server.XceiverServer;
-import org.apache.hadoop.ozone.container.common.transport.server
-    .XceiverServerGrpc;
-import org.apache.hadoop.ozone.container.common.transport.server
-    .XceiverServerSpi;
-import org.apache.hadoop.ozone.container.common.transport.server.ratis
-    .XceiverServerRatis;
+import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerGrpc;
+import org.apache.hadoop.ozone.container.common.transport.server.XceiverServerSpi;
+import org.apache.hadoop.ozone.container.common.transport.server.ratis.XceiverServerRatis;
+import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+
+import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
-import java.io.IOException;
-import java.nio.file.Paths;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.concurrent.TimeUnit;
+import java.io.*;
+import java.util.ArrayList;
+import java.util.Iterator;
 
 
-import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys
-    .OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_ROOT_PREFIX;
 import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
 import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
 
 
 /**
 /**
@@ -74,226 +51,123 @@ import static org.apache.hadoop.ozone.OzoneConsts.INVALID_PORT;
  * layer.
  * layer.
  */
  */
 public class OzoneContainer {
 public class OzoneContainer {
-  public static final Logger LOG =
-      LoggerFactory.getLogger(OzoneContainer.class);
 
 
-  private final Configuration ozoneConfig;
-  private final ContainerDispatcher dispatcher;
-  private final ContainerManager manager;
+  public static final Logger LOG = LoggerFactory.getLogger(
+      OzoneContainer.class);
+
+  private final HddsDispatcher hddsDispatcher;
+  private final DatanodeDetails dnDetails;
+  private final OzoneConfiguration config;
+  private final VolumeSet volumeSet;
+  private final ContainerSet containerSet;
   private final XceiverServerSpi[] server;
   private final XceiverServerSpi[] server;
-  private final ChunkManager chunkManager;
-  private final KeyManager keyManager;
-  private final BlockDeletingService blockDeletingService;
 
 
   /**
   /**
-   * Creates a network endpoint and enables Ozone container.
-   *
-   * @param ozoneConfig - Config
+   * Construct OzoneContainer object.
+   * @param datanodeDetails
+   * @param conf
+   * @throws DiskOutOfSpaceException
    * @throws IOException
    * @throws IOException
    */
    */
-  public OzoneContainer(
-      DatanodeDetails datanodeDetails, Configuration ozoneConfig)
-      throws IOException {
-    this.ozoneConfig = ozoneConfig;
-    List<StorageLocation> locations = new LinkedList<>();
-    String[] paths = ozoneConfig.getStrings(
-        OzoneConfigKeys.OZONE_METADATA_DIRS);
-    if (paths != null && paths.length > 0) {
-      for (String p : paths) {
-        locations.add(StorageLocation.parse(
-            Paths.get(p).resolve(CONTAINER_ROOT_PREFIX).toString()));
-      }
-    } else {
-      getDataDir(locations);
-    }
-
-    manager = new ContainerManagerImpl();
-    manager.init(this.ozoneConfig, locations, datanodeDetails);
-    this.chunkManager = new ChunkManagerImpl(manager);
-    manager.setChunkManager(this.chunkManager);
-
-    this.keyManager = new KeyManagerImpl(manager, ozoneConfig);
-    manager.setKeyManager(this.keyManager);
-
-    long svcInterval =
-        ozoneConfig.getTimeDuration(OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
-    long serviceTimeout = ozoneConfig.getTimeDuration(
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT, TimeUnit.MILLISECONDS);
-    this.blockDeletingService = new BlockDeletingService(manager,
-        svcInterval, serviceTimeout, ozoneConfig);
-
-    this.dispatcher = new Dispatcher(manager, this.ozoneConfig);
-
-    boolean useGrpc = this.ozoneConfig.getBoolean(
+  public OzoneContainer(DatanodeDetails datanodeDetails, OzoneConfiguration
+      conf) throws IOException {
+    this.dnDetails = datanodeDetails;
+    this.config = conf;
+    this.volumeSet = new VolumeSet(datanodeDetails, conf);
+    this.containerSet = new ContainerSet();
+    boolean useGrpc = this.config.getBoolean(
         ScmConfigKeys.DFS_CONTAINER_GRPC_ENABLED_KEY,
         ScmConfigKeys.DFS_CONTAINER_GRPC_ENABLED_KEY,
         ScmConfigKeys.DFS_CONTAINER_GRPC_ENABLED_DEFAULT);
         ScmConfigKeys.DFS_CONTAINER_GRPC_ENABLED_DEFAULT);
+    buildContainerSet();
+    hddsDispatcher = new HddsDispatcher(config, containerSet, volumeSet);
     server = new XceiverServerSpi[]{
     server = new XceiverServerSpi[]{
-        useGrpc ? new XceiverServerGrpc(datanodeDetails,
-            this.ozoneConfig, this.dispatcher) :
+        useGrpc ? new XceiverServerGrpc(datanodeDetails, this.config, this
+            .hddsDispatcher) :
             new XceiverServer(datanodeDetails,
             new XceiverServer(datanodeDetails,
-                this.ozoneConfig, this.dispatcher),
-      XceiverServerRatis
-          .newXceiverServerRatis(datanodeDetails, this.ozoneConfig, dispatcher)
+                this.config, this.hddsDispatcher),
+        XceiverServerRatis.newXceiverServerRatis(datanodeDetails, this
+            .config, hddsDispatcher)
     };
     };
-  }
 
 
-  /**
-   * Starts serving requests to ozone container.
-   *
-   * @throws IOException
-   */
-  public void start() throws IOException {
-    for (XceiverServerSpi serverinstance : server) {
-      serverinstance.start();
-    }
-    blockDeletingService.start();
-    dispatcher.init();
+
   }
   }
 
 
+
   /**
   /**
-   * Stops the ozone container.
-   * <p>
-   * Shutdown logic is not very obvious from the following code. if you need to
-   * modify the logic, please keep these comments in mind. Here is the shutdown
-   * sequence.
-   * <p>
-   * 1. We shutdown the network ports.
-   * <p>
-   * 2. Now we need to wait for all requests in-flight to finish.
-   * <p>
-   * 3. The container manager lock is a read-write lock with "Fairness"
-   * enabled.
-   * <p>
-   * 4. This means that the waiting threads are served in a "first-come-first
-   * -served" manner. Please note that this applies to waiting threads only.
-   * <p>
-   * 5. Since write locks are exclusive, if we are waiting to get a lock it
-   * implies that we are waiting for in-flight operations to complete.
-   * <p>
-   * 6. if there are other write operations waiting on the reader-writer lock,
-   * fairness guarantees that they will proceed before the shutdown lock
-   * request.
-   * <p>
-   * 7. Since all operations either take a reader or writer lock of container
-   * manager, we are guaranteed that we are the last operation since we have
-   * closed the network port, and we wait until close is successful.
-   * <p>
-   * 8. We take the writer lock and call shutdown on each of the managers in
-   * reverse order. That is chunkManager, keyManager and containerManager is
-   * shutdown.
+   * Build's container map.
    */
    */
-  public void stop() {
-    LOG.info("Attempting to stop container services.");
-    for(XceiverServerSpi serverinstance: server) {
-      serverinstance.stop();
+  public void buildContainerSet() {
+    Iterator<HddsVolume> volumeSetIterator = volumeSet.getVolumesList()
+        .iterator();
+    ArrayList<Thread> volumeThreads = new ArrayList<Thread>();
+
+    //TODO: diskchecker should be run before this, to see how disks are.
+    // And also handle disk failure tolerance need to be added
+    while (volumeSetIterator.hasNext()) {
+      HddsVolume volume = volumeSetIterator.next();
+      File hddsVolumeRootDir = volume.getHddsRootDir();
+      Thread thread = new Thread(new ContainerReader(hddsVolumeRootDir,
+          containerSet, config));
+      thread.start();
+      volumeThreads.add(thread);
     }
     }
-    dispatcher.shutdown();
 
 
     try {
     try {
-      this.manager.writeLock();
-      this.chunkManager.shutdown();
-      this.keyManager.shutdown();
-      this.manager.shutdown();
-      this.blockDeletingService.shutdown();
-      LOG.info("container services shutdown complete.");
-    } catch (IOException ex) {
-      LOG.warn("container service shutdown error:", ex);
-    } finally {
-      this.manager.writeUnlock();
+      for (int i = 0; i < volumeThreads.size(); i++) {
+        volumeThreads.get(i).join();
+      }
+    } catch (InterruptedException ex) {
+      LOG.info("Volume Threads Interrupted exception", ex);
     }
     }
+
   }
   }
 
 
   /**
   /**
-   * Returns a paths to data dirs.
+   * Starts serving requests to ozone container.
    *
    *
-   * @param pathList - List of paths.
    * @throws IOException
    * @throws IOException
    */
    */
-  private void getDataDir(List<StorageLocation> pathList) throws IOException {
-    for (String dir : ozoneConfig.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
-      StorageLocation location = StorageLocation.parse(dir);
-      pathList.add(location);
+  public void start() throws IOException {
+    LOG.info("Attempting to start container services.");
+    for (XceiverServerSpi serverinstance : server) {
+      serverinstance.start();
     }
     }
+    hddsDispatcher.init();
   }
   }
 
 
   /**
   /**
-   * Returns node report of container storage usage.
+   * Stop Container Service on the datanode.
    */
    */
-  public NodeReportProto getNodeReport() throws IOException {
-    return this.manager.getNodeReport();
-  }
-
-  private int getPortbyType(HddsProtos.ReplicationType replicationType) {
-    for (XceiverServerSpi serverinstance : server) {
-      if (serverinstance.getServerType() == replicationType) {
-        return serverinstance.getIPCPort();
-      }
+  public void stop() {
+    //TODO: at end of container IO integration work.
+    LOG.info("Attempting to stop container services.");
+    for(XceiverServerSpi serverinstance: server) {
+      serverinstance.stop();
     }
     }
-    return INVALID_PORT;
+    hddsDispatcher.shutdown();
   }
   }
 
 
-  /**
-   * Returns the container server IPC port.
-   *
-   * @return Container server IPC port.
-   */
-  public int getContainerServerPort() {
-    return getPortbyType(HddsProtos.ReplicationType.STAND_ALONE);
-  }
 
 
-  /**
-   * Returns the Ratis container Server IPC port.
-   *
-   * @return Ratis port.
-   */
-  public int getRatisContainerServerPort() {
-    return getPortbyType(HddsProtos.ReplicationType.RATIS);
+  @VisibleForTesting
+  public ContainerSet getContainerSet() {
+    return containerSet;
   }
   }
-
   /**
   /**
    * Returns container report.
    * Returns container report.
    * @return - container report.
    * @return - container report.
    * @throws IOException
    * @throws IOException
    */
    */
-  public ContainerReportsProto getContainerReport() throws IOException {
-    return this.manager.getContainerReport();
+  public StorageContainerDatanodeProtocolProtos.ContainerReportsProto
+      getContainerReport() throws IOException {
+    return this.containerSet.getContainerReport();
   }
   }
 
 
-// TODO: remove getContainerReports
   /**
   /**
-   * Returns the list of closed containers.
-   * @return - List of closed containers.
+   * Submit ContainerRequest.
+   * @param request
+   * @param replicationType
    * @throws IOException
    * @throws IOException
    */
    */
-  public List<ContainerData> getClosedContainerReports() throws IOException {
-    return this.manager.getClosedContainerReports();
-  }
-
-  private XceiverServerSpi getRatisSerer() {
-    for (XceiverServerSpi serverInstance : server) {
-      if (serverInstance instanceof XceiverServerRatis) {
-        return serverInstance;
-      }
-    }
-    return null;
-  }
-
-  private XceiverServerSpi getStandaAloneSerer() {
-    for (XceiverServerSpi serverInstance : server) {
-      if (!(serverInstance instanceof XceiverServerRatis)) {
-        return serverInstance;
-      }
-    }
-    return null;
-  }
-
-  @VisibleForTesting
-  public ContainerManager getContainerManager() {
-    return this.manager;
-  }
-
   public void submitContainerRequest(
   public void submitContainerRequest(
       ContainerProtos.ContainerCommandRequestProto request,
       ContainerProtos.ContainerCommandRequestProto request,
       HddsProtos.ReplicationType replicationType) throws IOException {
       HddsProtos.ReplicationType replicationType) throws IOException {
@@ -332,4 +206,66 @@ public class OzoneContainer {
           + " not supported over HearBeat Response");
           + " not supported over HearBeat Response");
     }
     }
   }
   }
-}
+
+  private XceiverServerSpi getRatisSerer() {
+    for (XceiverServerSpi serverInstance : server) {
+      if (serverInstance instanceof XceiverServerRatis) {
+        return serverInstance;
+      }
+    }
+    return null;
+  }
+
+  private XceiverServerSpi getStandaAloneSerer() {
+    for (XceiverServerSpi serverInstance : server) {
+      if (!(serverInstance instanceof XceiverServerRatis)) {
+        return serverInstance;
+      }
+    }
+    return null;
+  }
+
+  private int getPortbyType(HddsProtos.ReplicationType replicationType) {
+    for (XceiverServerSpi serverinstance : server) {
+      if (serverinstance.getServerType() == replicationType) {
+        return serverinstance.getIPCPort();
+      }
+    }
+    return INVALID_PORT;
+  }
+
+  /**
+   * Returns the container server IPC port.
+   *
+   * @return Container server IPC port.
+   */
+  public int getContainerServerPort() {
+    return getPortbyType(HddsProtos.ReplicationType.STAND_ALONE);
+  }
+
+  /**
+   * Returns the Ratis container Server IPC port.
+   *
+   * @return Ratis port.
+   */
+  public int getRatisContainerServerPort() {
+    return getPortbyType(HddsProtos.ReplicationType.RATIS);
+  }
+
+  /**
+   * Returns node report of container storage usage.
+   */
+  public StorageContainerDatanodeProtocolProtos.NodeReportProto getNodeReport()
+      throws IOException {
+    return volumeSet.getNodeReport();
+  }
+
+  @VisibleForTesting
+  public ContainerDispatcher getDispatcher() {
+    return this.hddsDispatcher;
+  }
+
+  public VolumeSet getVolumeSet() {
+    return volumeSet;
+  }
+}

+ 4 - 0
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/protocol/VersionResponse.java

@@ -105,6 +105,10 @@ public class VersionResponse {
             .addAllKeys(list).build();
             .addAllKeys(list).build();
   }
   }
 
 
+  public String getValue(String key) {
+    return this.values.get(key);
+  }
+
   /**
   /**
    * Builder class.
    * Builder class.
    */
    */

+ 11 - 2
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/SCMTestUtils.java

@@ -25,16 +25,20 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolService;
     .StorageContainerDatanodeProtocolService;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.ProtobufRpcEngine;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
 import org.apache.hadoop.ozone.protocolPB.StorageContainerDatanodeProtocolPB;
 import org.apache.hadoop.ozone.protocolPB
 import org.apache.hadoop.ozone.protocolPB
     .StorageContainerDatanodeProtocolServerSideTranslatorPB;
     .StorageContainerDatanodeProtocolServerSideTranslatorPB;
+import org.apache.hadoop.test.GenericTestUtils;
 
 
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.ServerSocket;
 import java.net.ServerSocket;
 
 
+import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+
 /**
 /**
  * Test Endpoint class.
  * Test Endpoint class.
  */
  */
@@ -109,8 +113,13 @@ public final class SCMTestUtils {
     }
     }
   }
   }
 
 
-  public static Configuration getConf() {
-    return new Configuration();
+  public static OzoneConfiguration getConf() {
+    OzoneConfiguration conf = new OzoneConfiguration();
+    conf.set(HDDS_DATANODE_DIR_KEY, GenericTestUtils
+        .getRandomizedTempPath());
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, GenericTestUtils
+        .getRandomizedTempPath());
+    return conf;
   }
   }
 
 
   public static OzoneConfiguration getOzoneConf() {
   public static OzoneConfiguration getOzoneConf() {

+ 4 - 0
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/ScmTestMock.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
     .StorageContainerDatanodeProtocolProtos.NodeReportProto;
 import org.apache.hadoop.hdds.protocol.proto
 import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.StorageReportProto;
     .StorageContainerDatanodeProtocolProtos.StorageReportProto;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
 import org.apache.hadoop.ozone.protocol.StorageContainerDatanodeProtocol;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 
 
@@ -151,7 +152,10 @@ public class ScmTestMock implements StorageContainerDatanodeProtocol {
     return VersionResponse.newBuilder()
     return VersionResponse.newBuilder()
         .setVersion(versionInfo.getVersion())
         .setVersion(versionInfo.getVersion())
         .addValue(VersionInfo.DESCRIPTION_KEY, versionInfo.getDescription())
         .addValue(VersionInfo.DESCRIPTION_KEY, versionInfo.getDescription())
+        .addValue(OzoneConsts.SCM_ID, UUID.randomUUID().toString())
+        .addValue(OzoneConsts.CLUSTER_ID, UUID.randomUUID().toString())
         .build().getProtobufMessage();
         .build().getProtobufMessage();
+
   }
   }
 
 
   private void sleepIfNeeded() {
   private void sleepIfNeeded() {

+ 1 - 2
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/TestKeyValueContainerData.java

@@ -42,8 +42,7 @@ public class TestKeyValueContainerData {
         .ContainerLifeCycleState.CLOSED;
         .ContainerLifeCycleState.CLOSED;
     AtomicLong val = new AtomicLong(0);
     AtomicLong val = new AtomicLong(0);
 
 
-    KeyValueContainerData kvData = new KeyValueContainerData(containerType,
-        containerId);
+    KeyValueContainerData kvData = new KeyValueContainerData(containerId);
 
 
     assertEquals(containerType, kvData.getContainerType());
     assertEquals(containerType, kvData.getContainerType());
     assertEquals(containerId, kvData.getContainerId());
     assertEquals(containerId, kvData.getContainerId());

+ 14 - 11
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestKeyValueYaml.java → hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerDataYaml.java

@@ -22,7 +22,6 @@ import org.apache.hadoop.fs.FileSystemTestHelper;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
-import org.apache.hadoop.ozone.container.keyvalue.KeyValueYaml;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.apache.hadoop.test.GenericTestUtils;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -36,7 +35,7 @@ import static org.junit.Assert.fail;
 /**
 /**
  * This class tests create/read .container files.
  * This class tests create/read .container files.
  */
  */
-public class TestKeyValueYaml {
+public class TestContainerDataYaml {
 
 
   @Test
   @Test
   public void testCreateContainerFile() throws IOException {
   public void testCreateContainerFile() throws IOException {
@@ -46,8 +45,7 @@ public class TestKeyValueYaml {
     File filePath = new File(new FileSystemTestHelper().getTestRootDir());
     File filePath = new File(new FileSystemTestHelper().getTestRootDir());
     filePath.mkdirs();
     filePath.mkdirs();
 
 
-    KeyValueContainerData keyValueContainerData = new KeyValueContainerData(
-        ContainerProtos.ContainerType.KeyValueContainer, Long.MAX_VALUE);
+    KeyValueContainerData keyValueContainerData = new KeyValueContainerData(Long.MAX_VALUE);
     keyValueContainerData.setContainerDBType("RocksDB");
     keyValueContainerData.setContainerDBType("RocksDB");
     keyValueContainerData.setMetadataPath(path);
     keyValueContainerData.setMetadataPath(path);
     keyValueContainerData.setChunksPath(path);
     keyValueContainerData.setChunksPath(path);
@@ -55,14 +53,15 @@ public class TestKeyValueYaml {
     File containerFile = new File(filePath, containerPath);
     File containerFile = new File(filePath, containerPath);
 
 
     // Create .container file with ContainerData
     // Create .container file with ContainerData
-    KeyValueYaml.createContainerFile(containerFile, keyValueContainerData);
+    ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
+            .KeyValueContainer, containerFile, keyValueContainerData);
 
 
     //Check .container file exists or not.
     //Check .container file exists or not.
     assertTrue(containerFile.exists());
     assertTrue(containerFile.exists());
 
 
     // Read from .container file, and verify data.
     // Read from .container file, and verify data.
-    KeyValueContainerData kvData = KeyValueYaml.readContainerFile(
-        containerFile);
+    KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
+        .readContainerFile(containerFile);
     assertEquals(Long.MAX_VALUE, kvData.getContainerId());
     assertEquals(Long.MAX_VALUE, kvData.getContainerId());
     assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
     assertEquals(ContainerProtos.ContainerType.KeyValueContainer, kvData
         .getContainerType());
         .getContainerType());
@@ -82,10 +81,12 @@ public class TestKeyValueYaml {
 
 
     // Update .container file with new ContainerData.
     // Update .container file with new ContainerData.
     containerFile = new File(filePath, containerPath);
     containerFile = new File(filePath, containerPath);
-    KeyValueYaml.createContainerFile(containerFile, kvData);
+    ContainerDataYaml.createContainerFile(ContainerProtos.ContainerType
+            .KeyValueContainer, containerFile, kvData);
 
 
     // Reading newly updated data from .container file
     // Reading newly updated data from .container file
-    kvData =  KeyValueYaml.readContainerFile(containerFile);
+    kvData =  (KeyValueContainerData) ContainerDataYaml.readContainerFile(
+        containerFile);
 
 
     // verify data.
     // verify data.
     assertEquals(Long.MAX_VALUE, kvData.getContainerId());
     assertEquals(Long.MAX_VALUE, kvData.getContainerId());
@@ -113,7 +114,8 @@ public class TestKeyValueYaml {
       //Get file from resources folder
       //Get file from resources folder
       ClassLoader classLoader = getClass().getClassLoader();
       ClassLoader classLoader = getClass().getClassLoader();
       File file = new File(classLoader.getResource(path).getFile());
       File file = new File(classLoader.getResource(path).getFile());
-      KeyValueContainerData kvData = KeyValueYaml.readContainerFile(file);
+      KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
+          .readContainerFile(file);
       fail("testIncorrectContainerFile failed");
       fail("testIncorrectContainerFile failed");
     } catch (IllegalStateException ex) {
     } catch (IllegalStateException ex) {
       GenericTestUtils.assertExceptionContains("Unexpected " +
       GenericTestUtils.assertExceptionContains("Unexpected " +
@@ -135,7 +137,8 @@ public class TestKeyValueYaml {
       //Get file from resources folder
       //Get file from resources folder
       ClassLoader classLoader = getClass().getClassLoader();
       ClassLoader classLoader = getClass().getClassLoader();
       File file = new File(classLoader.getResource(path).getFile());
       File file = new File(classLoader.getResource(path).getFile());
-      KeyValueContainerData kvData = KeyValueYaml.readContainerFile(file);
+      KeyValueContainerData kvData = (KeyValueContainerData) ContainerDataYaml
+          .readContainerFile(file);
 
 
       //Checking the Container file data is consistent or not
       //Checking the Container file data is consistent or not
       assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData
       assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED, kvData

+ 2 - 4
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerSet.java

@@ -53,8 +53,7 @@ public class TestContainerSet {
     ContainerProtos.ContainerLifeCycleState state = ContainerProtos
     ContainerProtos.ContainerLifeCycleState state = ContainerProtos
         .ContainerLifeCycleState.CLOSED;
         .ContainerLifeCycleState.CLOSED;
 
 
-    KeyValueContainerData kvData = new KeyValueContainerData(
-        ContainerProtos.ContainerType.KeyValueContainer, containerId);
+    KeyValueContainerData kvData = new KeyValueContainerData(containerId);
     kvData.setState(state);
     kvData.setState(state);
     KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new
     KeyValueContainer keyValueContainer = new KeyValueContainer(kvData, new
         OzoneConfiguration());
         OzoneConfiguration());
@@ -164,8 +163,7 @@ public class TestContainerSet {
   private ContainerSet createContainerSet() throws StorageContainerException {
   private ContainerSet createContainerSet() throws StorageContainerException {
     ContainerSet containerSet = new ContainerSet();
     ContainerSet containerSet = new ContainerSet();
     for (int i=0; i<10; i++) {
     for (int i=0; i<10; i++) {
-      KeyValueContainerData kvData = new KeyValueContainerData(
-          ContainerProtos.ContainerType.KeyValueContainer, i);
+      KeyValueContainerData kvData = new KeyValueContainerData(i);
       if (i%2 == 0) {
       if (i%2 == 0) {
         kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
         kvData.setState(ContainerProtos.ContainerLifeCycleState.CLOSED);
       } else {
       } else {

+ 3 - 5
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/interfaces/TestHandler.java

@@ -52,7 +52,6 @@ public class TestHandler {
   private VolumeSet volumeSet;
   private VolumeSet volumeSet;
   private Handler handler;
   private Handler handler;
 
 
-  private final static String SCM_ID = UUID.randomUUID().toString();
   private final static String DATANODE_UUID = UUID.randomUUID().toString();
   private final static String DATANODE_UUID = UUID.randomUUID().toString();
 
 
   @Before
   @Before
@@ -61,12 +60,12 @@ public class TestHandler {
     this.containerSet = Mockito.mock(ContainerSet.class);
     this.containerSet = Mockito.mock(ContainerSet.class);
     this.volumeSet = Mockito.mock(VolumeSet.class);
     this.volumeSet = Mockito.mock(VolumeSet.class);
 
 
-    this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, SCM_ID);
+    this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet);
   }
   }
 
 
   @Test
   @Test
   public void testGetKeyValueHandler() throws Exception {
   public void testGetKeyValueHandler() throws Exception {
-    Handler kvHandler = dispatcher.getHandlerForContainerType(
+    Handler kvHandler = dispatcher.getHandler(
         ContainerProtos.ContainerType.KeyValueContainer);
         ContainerProtos.ContainerType.KeyValueContainer);
 
 
     Assert.assertTrue("getHandlerForContainerType returned incorrect handler",
     Assert.assertTrue("getHandlerForContainerType returned incorrect handler",
@@ -83,8 +82,7 @@ public class TestHandler {
     Assert.assertEquals("New ContainerType detected. Not an invalid " +
     Assert.assertEquals("New ContainerType detected. Not an invalid " +
         "containerType", invalidContainerType, null);
         "containerType", invalidContainerType, null);
 
 
-    Handler handler = dispatcher.getHandlerForContainerType(
-        invalidContainerType);
+    Handler handler = dispatcher.getHandler(invalidContainerType);
     Assert.assertEquals("Get Handler for Invalid ContainerType should " +
     Assert.assertEquals("Get Handler for Invalid ContainerType should " +
         "return null.", handler, null);
         "return null.", handler, null);
   }
   }

+ 1 - 2
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestChunkManagerImpl.java

@@ -81,8 +81,7 @@ public class TestChunkManagerImpl {
     Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
     Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
         .thenReturn(hddsVolume);
         .thenReturn(hddsVolume);
 
 
-    keyValueContainerData = new KeyValueContainerData(
-        ContainerProtos.ContainerType.KeyValueContainer, 1L);
+    keyValueContainerData = new KeyValueContainerData(1L);
 
 
     keyValueContainer = new KeyValueContainer(
     keyValueContainer = new KeyValueContainer(
         keyValueContainerData, config);
         keyValueContainerData, config);

+ 1 - 2
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyManagerImpl.java

@@ -79,8 +79,7 @@ public class TestKeyManagerImpl {
     Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
     Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
         .thenReturn(hddsVolume);
         .thenReturn(hddsVolume);
 
 
-    keyValueContainerData = new KeyValueContainerData(
-        ContainerProtos.ContainerType.KeyValueContainer, 1L);
+    keyValueContainerData = new KeyValueContainerData(1L);
 
 
     keyValueContainer = new KeyValueContainer(
     keyValueContainer = new KeyValueContainer(
         keyValueContainerData, config);
         keyValueContainerData, config);

+ 6 - 4
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueContainer.java

@@ -24,6 +24,7 @@ import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 
 
 import org.apache.hadoop.hdds.scm.container.common.helpers
 import org.apache.hadoop.hdds.scm.container.common.helpers
     .StorageContainerException;
     .StorageContainerException;
+import org.apache.hadoop.ozone.container.common.impl.ContainerDataYaml;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume.HddsVolume;
 import org.apache.hadoop.ozone.container.common.volume
 import org.apache.hadoop.ozone.container.common.volume
     .RoundRobinVolumeChoosingPolicy;
     .RoundRobinVolumeChoosingPolicy;
@@ -85,8 +86,7 @@ public class TestKeyValueContainer {
     Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
     Mockito.when(volumeChoosingPolicy.chooseVolume(anyList(), anyLong()))
         .thenReturn(hddsVolume);
         .thenReturn(hddsVolume);
 
 
-    keyValueContainerData = new KeyValueContainerData(
-        ContainerProtos.ContainerType.KeyValueContainer, 1L);
+    keyValueContainerData = new KeyValueContainerData(1L);
 
 
     keyValueContainer = new KeyValueContainer(
     keyValueContainer = new KeyValueContainer(
         keyValueContainerData, conf);
         keyValueContainerData, conf);
@@ -197,7 +197,8 @@ public class TestKeyValueContainer {
     File containerFile = KeyValueContainerLocationUtil.getContainerFile(
     File containerFile = KeyValueContainerLocationUtil.getContainerFile(
         containerMetaDataLoc, containerName);
         containerMetaDataLoc, containerName);
 
 
-    keyValueContainerData = KeyValueYaml.readContainerFile(containerFile);
+    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
+        .readContainerFile(containerFile);
     assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
     assertEquals(ContainerProtos.ContainerLifeCycleState.CLOSED,
         keyValueContainerData.getState());
         keyValueContainerData.getState());
   }
   }
@@ -237,7 +238,8 @@ public class TestKeyValueContainer {
     File containerFile = KeyValueContainerLocationUtil.getContainerFile(
     File containerFile = KeyValueContainerLocationUtil.getContainerFile(
         containerMetaDataLoc, containerName);
         containerMetaDataLoc, containerName);
 
 
-    keyValueContainerData = KeyValueYaml.readContainerFile(containerFile);
+    keyValueContainerData = (KeyValueContainerData) ContainerDataYaml
+        .readContainerFile(containerFile);
     assertEquals(2, keyValueContainerData.getMetadata().size());
     assertEquals(2, keyValueContainerData.getMetadata().size());
 
 
   }
   }

+ 4 - 4
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/keyvalue/TestKeyValueHandler.java

@@ -74,9 +74,10 @@ public class TestKeyValueHandler {
         .build();
         .build();
     this.volumeSet = new VolumeSet(datanodeDetails, conf);
     this.volumeSet = new VolumeSet(datanodeDetails, conf);
 
 
-    this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet, SCM_ID);
-    this.handler = (KeyValueHandler) dispatcher.getHandlerForContainerType(
+    this.dispatcher = new HddsDispatcher(conf, containerSet, volumeSet);
+    this.handler = (KeyValueHandler) dispatcher.getHandler(
         ContainerProtos.ContainerType.KeyValueContainer);
         ContainerProtos.ContainerType.KeyValueContainer);
+    dispatcher.setScmId(UUID.randomUUID().toString());
   }
   }
 
 
   @Test
   @Test
@@ -87,8 +88,7 @@ public class TestKeyValueHandler {
     // Create mock HddsDispatcher and KeyValueHandler.
     // Create mock HddsDispatcher and KeyValueHandler.
     this.handler = Mockito.mock(KeyValueHandler.class);
     this.handler = Mockito.mock(KeyValueHandler.class);
     this.dispatcher = Mockito.mock(HddsDispatcher.class);
     this.dispatcher = Mockito.mock(HddsDispatcher.class);
-    Mockito.when(dispatcher.getHandlerForContainerType(any())).thenReturn
-        (handler);
+    Mockito.when(dispatcher.getHandler(any())).thenReturn(handler);
     Mockito.when(dispatcher.dispatch(any())).thenCallRealMethod();
     Mockito.when(dispatcher.dispatch(any())).thenCallRealMethod();
     Mockito.when(dispatcher.getContainer(anyLong())).thenReturn(
     Mockito.when(dispatcher.getContainer(anyLong())).thenReturn(
         Mockito.mock(KeyValueContainer.class));
         Mockito.mock(KeyValueContainer.class));

+ 108 - 0
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java

@@ -0,0 +1,108 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ *  with the License.  You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ *  Unless required by applicable law or agreed to in writing, software
+ *  distributed under the License is distributed on an "AS IS" BASIS,
+ *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ *  See the License for the specific language governing permissions and
+ *  limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.container.ozoneimpl;
+
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.DatanodeDetails;
+import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
+import org.apache.hadoop.hdds.scm.ScmConfigKeys;
+import org.apache.hadoop.ozone.OzoneConfigKeys;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
+import org.apache.hadoop.ozone.container.common.volume.RoundRobinVolumeChoosingPolicy;
+import org.apache.hadoop.ozone.container.common.volume.VolumeSet;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.junit.Before;
+import org.junit.Rule;
+import org.junit.Test;
+import org.junit.rules.TemporaryFolder;
+import java.util.Random;
+import java.util.UUID;
+
+
+import static org.junit.Assert.assertEquals;
+
+/**
+ * This class is used to test OzoneContainer.
+ */
+public class TestOzoneContainer {
+
+  @Rule
+  public TemporaryFolder folder = new TemporaryFolder();
+
+
+  private OzoneConfiguration conf;
+  private String scmId = UUID.randomUUID().toString();
+  private VolumeSet volumeSet;
+  private RoundRobinVolumeChoosingPolicy volumeChoosingPolicy;
+  private KeyValueContainerData keyValueContainerData;
+  private KeyValueContainer keyValueContainer;
+  private final DatanodeDetails datanodeDetails = createDatanodeDetails();
+
+  @Before
+  public void setUp() throws Exception {
+    conf = new OzoneConfiguration();
+    conf.set(ScmConfigKeys.HDDS_DATANODE_DIR_KEY, folder.getRoot()
+        .getAbsolutePath() + "," + folder.newFolder().getAbsolutePath());
+    conf.set(OzoneConfigKeys.OZONE_METADATA_DIRS, folder.newFolder().getAbsolutePath());
+    volumeSet = new VolumeSet(datanodeDetails, conf);
+    volumeChoosingPolicy = new RoundRobinVolumeChoosingPolicy();
+
+    for (int i=0; i<10; i++) {
+      keyValueContainerData = new KeyValueContainerData(i);
+      keyValueContainer = new KeyValueContainer(
+          keyValueContainerData, conf);
+      keyValueContainer.create(volumeSet, volumeChoosingPolicy, scmId);
+    }
+  }
+
+  @Test
+  public void testBuildContainerMap() throws Exception {
+    OzoneContainer ozoneContainer = new
+        OzoneContainer(datanodeDetails, conf);
+    ContainerSet containerset = ozoneContainer.getContainerSet();
+    assertEquals(10, containerset.containerCount());
+  }
+
+
+  private DatanodeDetails createDatanodeDetails() {
+    Random random = new Random();
+    String ipAddress =
+        random.nextInt(256) + "." + random.nextInt(256) + "." + random
+            .nextInt(256) + "." + random.nextInt(256);
+
+    String uuid = UUID.randomUUID().toString();
+    String hostName = uuid;
+    DatanodeDetails.Port containerPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.STANDALONE, 0);
+    DatanodeDetails.Port ratisPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.RATIS, 0);
+    DatanodeDetails.Port restPort = DatanodeDetails.newPort(
+        DatanodeDetails.Port.Name.REST, 0);
+    DatanodeDetails.Builder builder = DatanodeDetails.newBuilder();
+    builder.setUuid(uuid)
+        .setHostName("localhost")
+        .setIpAddress(ipAddress)
+        .addPort(containerPort)
+        .addPort(ratisPort)
+        .addPort(restPort);
+    return builder.build();
+  }
+}

+ 4 - 0
hadoop-hdds/server-scm/src/main/java/org/apache/hadoop/hdds/scm/node/SCMNodeManager.java

@@ -40,6 +40,7 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
     .StorageContainerDatanodeProtocolProtos.SCMVersionRequestProto;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.metrics2.util.MBeans;
+import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
 import org.apache.hadoop.ozone.protocol.StorageContainerNodeProtocol;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocol.VersionResponse;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
 import org.apache.hadoop.ozone.protocol.commands.RegisteredCommand;
@@ -703,6 +704,9 @@ public class SCMNodeManager
   public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
   public VersionResponse getVersion(SCMVersionRequestProto versionRequest) {
     return VersionResponse.newBuilder()
     return VersionResponse.newBuilder()
         .setVersion(this.version.getVersion())
         .setVersion(this.version.getVersion())
+        .addValue(OzoneConsts.SCM_ID, this.scmManager.getScmStorage().getScmId())
+        .addValue(OzoneConsts.CLUSTER_ID, this.scmManager.getScmStorage()
+            .getClusterID())
         .build();
         .build();
   }
   }
 
 

+ 14 - 6
hadoop-hdds/server-scm/src/test/java/org/apache/hadoop/ozone/container/common/TestEndPoint.java

@@ -20,6 +20,7 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.commons.lang3.RandomUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.protocol.proto.StorageContainerDatanodeProtocolProtos;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.TestUtils;
 import org.apache.hadoop.hdds.scm.VersionInfo;
 import org.apache.hadoop.hdds.scm.VersionInfo;
@@ -125,12 +126,14 @@ public class TestEndPoint {
    * how the state machine would make the call.
    * how the state machine would make the call.
    */
    */
   public void testGetVersionTask() throws Exception {
   public void testGetVersionTask() throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
+    OzoneConfiguration conf = SCMTestUtils.getConf();
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         serverAddress, 1000)) {
         serverAddress, 1000)) {
+      OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
+          conf);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
       VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
-          conf);
+          conf, ozoneContainer);
       EndpointStateMachine.EndPointStates newState = versionTask.call();
       EndpointStateMachine.EndPointStates newState = versionTask.call();
 
 
       // if version call worked the endpoint should automatically move to the
       // if version call worked the endpoint should automatically move to the
@@ -149,14 +152,16 @@ public class TestEndPoint {
    * expect that versionTask should be able to handle it.
    * expect that versionTask should be able to handle it.
    */
    */
   public void testGetVersionToInvalidEndpoint() throws Exception {
   public void testGetVersionToInvalidEndpoint() throws Exception {
-    Configuration conf = SCMTestUtils.getConf();
+    OzoneConfiguration conf = SCMTestUtils.getConf();
     InetSocketAddress nonExistentServerAddress = SCMTestUtils
     InetSocketAddress nonExistentServerAddress = SCMTestUtils
         .getReuseableAddress();
         .getReuseableAddress();
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         nonExistentServerAddress, 1000)) {
         nonExistentServerAddress, 1000)) {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
+      OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
           conf);
           conf);
+      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
+          conf, ozoneContainer);
       EndpointStateMachine.EndPointStates newState = versionTask.call();
       EndpointStateMachine.EndPointStates newState = versionTask.call();
 
 
       // This version call did NOT work, so endpoint should remain in the same
       // This version call did NOT work, so endpoint should remain in the same
@@ -175,13 +180,15 @@ public class TestEndPoint {
   public void testGetVersionAssertRpcTimeOut() throws Exception {
   public void testGetVersionAssertRpcTimeOut() throws Exception {
     final long rpcTimeout = 1000;
     final long rpcTimeout = 1000;
     final long tolerance = 100;
     final long tolerance = 100;
-    Configuration conf = SCMTestUtils.getConf();
+    OzoneConfiguration conf = SCMTestUtils.getConf();
 
 
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
     try (EndpointStateMachine rpcEndPoint = createEndpoint(conf,
         serverAddress, (int) rpcTimeout)) {
         serverAddress, (int) rpcTimeout)) {
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
       rpcEndPoint.setState(EndpointStateMachine.EndPointStates.GETVERSION);
-      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
+      OzoneContainer ozoneContainer = new OzoneContainer(getDatanodeDetails(),
           conf);
           conf);
+      VersionEndpointTask versionTask = new VersionEndpointTask(rpcEndPoint,
+          conf, ozoneContainer);
 
 
       scmServerImpl.setRpcResponseDelay(1500);
       scmServerImpl.setRpcResponseDelay(1500);
       long start = Time.monotonicNow();
       long start = Time.monotonicNow();
@@ -386,4 +393,5 @@ public class TestEndPoint {
     }
     }
     return reportsBuilder.build();
     return reportsBuilder.build();
   }
   }
+
 }
 }

+ 7 - 4
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/TestStorageContainerManagerHelper.java

@@ -27,8 +27,10 @@ import org.apache.hadoop.hdds.scm.container.common.helpers.ContainerInfo;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdfs.server.datanode.ObjectStoreHandler;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.helpers.KeyUtils;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyInfo;
@@ -163,8 +165,9 @@ public class TestStorageContainerManagerHelper {
     DatanodeDetails leadDN = container.getPipeline().getLeader();
     DatanodeDetails leadDN = container.getPipeline().getLeader();
     OzoneContainer containerServer =
     OzoneContainer containerServer =
         getContainerServerByDatanodeUuid(leadDN.getUuidString());
         getContainerServerByDatanodeUuid(leadDN.getUuidString());
-    ContainerData containerData = containerServer.getContainerManager()
-        .readContainer(containerID);
+    KeyValueContainerData containerData = (KeyValueContainerData) containerServer
+        .getContainerSet()
+        .getContainer(containerID).getContainerData();
     return KeyUtils.getDB(containerData, conf);
     return KeyUtils.getDB(containerData, conf);
   }
   }
 
 

+ 3 - 3
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerByPipeline.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.ozone.client.ObjectStore;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClient;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.OzoneClientFactory;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
@@ -183,7 +183,7 @@ public class TestCloseContainerByPipeline {
     for (DatanodeDetails datanodeDetails : datanodes) {
     for (DatanodeDetails datanodeDetails : datanodes) {
       GenericTestUtils.waitFor(
       GenericTestUtils.waitFor(
           () -> isContainerClosed(cluster, containerID, datanodeDetails), 500,
           () -> isContainerClosed(cluster, containerID, datanodeDetails), 500,
-          5 * 1000);
+          15 * 1000);
       //double check if it's really closed (waitFor also throws an exception)
       //double check if it's really closed (waitFor also throws an exception)
       Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
       Assert.assertTrue(isContainerClosed(cluster, containerID, datanodeDetails));
     }
     }
@@ -204,7 +204,7 @@ public class TestCloseContainerByPipeline {
         if (datanode.equals(datanodeService.getDatanodeDetails())) {
         if (datanode.equals(datanodeService.getDatanodeDetails())) {
           containerData =
           containerData =
               datanodeService.getDatanodeStateMachine().getContainer()
               datanodeService.getDatanodeStateMachine().getContainer()
-                  .getContainerManager().readContainer(containerID);
+                  .getContainerSet().getContainer(containerID).getContainerData();
           if (!containerData.isOpen()) {
           if (!containerData.isOpen()) {
             // make sure the closeContainerHandler on the Datanode is invoked
             // make sure the closeContainerHandler on the Datanode is invoked
             Assert.assertTrue(
             Assert.assertTrue(

+ 3 - 3
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/common/statemachine/commandhandler/TestCloseContainerHandler.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.hdds.client.ReplicationFactor;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.hdds.client.ReplicationType;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
 import org.apache.hadoop.ozone.client.rest.OzoneException;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
 import org.apache.hadoop.ozone.protocol.commands.CloseContainerCommand;
@@ -104,8 +104,8 @@ public class TestCloseContainerHandler {
     ContainerData containerData;
     ContainerData containerData;
     try {
     try {
       containerData = cluster.getHddsDatanodes().get(0)
       containerData = cluster.getHddsDatanodes().get(0)
-          .getDatanodeStateMachine().getContainer().getContainerManager()
-          .readContainer(containerID);
+          .getDatanodeStateMachine().getContainer().getContainerSet()
+          .getContainer(containerID).getContainerData();
       return !containerData.isOpen();
       return !containerData.isOpen();
     } catch (StorageContainerException e) {
     } catch (StorageContainerException e) {
       throw new AssertionError(e);
       throw new AssertionError(e);

+ 7 - 6
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java

@@ -35,10 +35,7 @@ import org.junit.Rule;
 import org.junit.Test;
 import org.junit.Test;
 import org.junit.rules.Timeout;
 import org.junit.rules.Timeout;
 
 
-import java.util.HashMap;
-import java.util.LinkedList;
-import java.util.List;
-import java.util.Map;
+import java.util.*;
 import java.util.concurrent.CompletableFuture;
 import java.util.concurrent.CompletableFuture;
 
 
 /**
 /**
@@ -66,7 +63,11 @@ public class TestOzoneContainer {
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader()
       conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT, pipeline.getLeader()
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
               .getPort(DatanodeDetails.Port.Name.STANDALONE).getValue());
       conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
       conf.setBoolean(OzoneConfigKeys.DFS_CONTAINER_IPC_RANDOM_PORT, false);
-      container = new OzoneContainer(TestUtils.getDatanodeDetails(), conf);
+
+      container = new OzoneContainer(TestUtils.getDatanodeDetails(),
+          conf);
+      //Setting scmId, as we start manually ozone container.
+      container.getDispatcher().setScmId(UUID.randomUUID().toString());
       container.start();
       container.start();
 
 
       XceiverClient client = new XceiverClient(pipeline, conf);
       XceiverClient client = new XceiverClient(pipeline, conf);
@@ -392,7 +393,7 @@ public class TestOzoneContainer {
       response = client.sendCommand(request);
       response = client.sendCommand(request);
 
 
       Assert.assertNotNull(response);
       Assert.assertNotNull(response);
-      Assert.assertEquals(ContainerProtos.Result.UNCLOSED_CONTAINER_IO,
+      Assert.assertEquals(ContainerProtos.Result.DELETE_ON_OPEN_CONTAINER,
           response.getResult());
           response.getResult());
       Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
       Assert.assertTrue(request.getTraceID().equals(response.getTraceID()));
 
 

+ 10 - 0
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/container/server/TestContainerServer.java

@@ -18,6 +18,7 @@
 
 
 package org.apache.hadoop.ozone.container.server;
 package org.apache.hadoop.ozone.container.server;
 
 
+import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.ratis.shaded.io.netty.channel.embedded.EmbeddedChannel;
 import org.apache.ratis.shaded.io.netty.channel.embedded.EmbeddedChannel;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.DatanodeDetails;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
 import org.apache.hadoop.hdds.protocol.datanode.proto.ContainerProtos;
@@ -262,5 +263,14 @@ public class TestContainerServer {
     @Override
     @Override
     public void shutdown() {
     public void shutdown() {
     }
     }
+    @Override
+    public Handler getHandler(ContainerProtos.ContainerType containerType) {
+      return null;
+    }
+
+    @Override
+    public void setScmId(String scmId) {
+
+    }
   }
   }
 }
 }

+ 7 - 7
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/ksm/TestContainerReportWithKeys.java

@@ -27,8 +27,8 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.client.*;
 import org.apache.hadoop.ozone.client.*;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
+import org.apache.hadoop.ozone.container.common.impl.ContainerData;
+import org.apache.hadoop.ozone.container.common.impl.ContainerSet;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyLocationInfo;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
 import org.apache.hadoop.hdds.scm.server.StorageContainerManager;
@@ -119,8 +119,8 @@ public class TestContainerReportWithKeys {
 
 
     ContainerData cd = getContainerData(keyInfo.getContainerID());
     ContainerData cd = getContainerData(keyInfo.getContainerID());
 
 
-    LOG.info("DN Container Data:  keyCount: {} used: {} ",
-        cd.getKeyCount(), cd.getBytesUsed());
+/*    LOG.info("DN Container Data:  keyCount: {} used: {} ",
+        cd.getKeyCount(), cd.getBytesUsed());*/
 
 
     ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
     ContainerInfo cinfo = scm.getContainerInfo(keyInfo.getContainerID());
 
 
@@ -132,9 +132,9 @@ public class TestContainerReportWithKeys {
   private static ContainerData getContainerData(long containerID) {
   private static ContainerData getContainerData(long containerID) {
     ContainerData containerData;
     ContainerData containerData;
     try {
     try {
-      ContainerManager containerManager = cluster.getHddsDatanodes().get(0)
-          .getDatanodeStateMachine().getContainer().getContainerManager();
-      containerData = containerManager.readContainer(containerID);
+      ContainerSet containerManager = cluster.getHddsDatanodes().get(0)
+          .getDatanodeStateMachine().getContainer().getContainerSet();
+      containerData = containerManager.getContainer(containerID).getContainerData();
     } catch (StorageContainerException e) {
     } catch (StorageContainerException e) {
       throw new AssertionError(e);
       throw new AssertionError(e);
     }
     }

+ 13 - 9
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java

@@ -44,9 +44,10 @@ import org.apache.hadoop.ozone.client.io.OzoneInputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.io.OzoneOutputStream;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.protocol.ClientProtocol;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
 import org.apache.hadoop.ozone.client.rpc.RpcClient;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
-import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainer;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueContainerData;
+import org.apache.hadoop.ozone.container.keyvalue.KeyValueHandler;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.ozone.ksm.KeySpaceManager;
 import org.apache.hadoop.ozone.ksm.KeySpaceManager;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
 import org.apache.hadoop.ozone.ksm.helpers.KsmKeyArgs;
@@ -698,13 +699,16 @@ public class TestKeys {
         List<KsmKeyLocationInfo> locations =
         List<KsmKeyLocationInfo> locations =
             keyInfo.getLatestVersionLocations().getLocationList();
             keyInfo.getLatestVersionLocations().getLocationList();
         for (KsmKeyLocationInfo location : locations) {
         for (KsmKeyLocationInfo location : locations) {
-          KeyData keyData = new KeyData(location.getBlockID());
-          KeyData blockInfo = cm.getContainerManager()
-              .getKeyManager().getKey(keyData);
-          ContainerData containerData = cm.getContainerManager()
-              .readContainer(keyData.getContainerID());
-          File dataDir = ContainerUtils
-              .getDataDirectory(containerData).toFile();
+          KeyValueHandler  keyValueHandler = (KeyValueHandler) cm
+              .getDispatcher().getHandler(ContainerProtos.ContainerType
+                  .KeyValueContainer);
+          KeyValueContainer container = (KeyValueContainer) cm.getContainerSet()
+              .getContainer(location.getBlockID().getContainerID());
+          KeyData blockInfo = keyValueHandler
+              .getKeyManager().getKey(container, location.getBlockID());
+          KeyValueContainerData containerData = (KeyValueContainerData) container
+              .getContainerData();
+          File dataDir = new File(containerData.getChunksPath());
           for (ContainerProtos.ChunkInfo chunkInfo : blockInfo.getChunks()) {
           for (ContainerProtos.ChunkInfo chunkInfo : blockInfo.getChunks()) {
             File chunkFile = dataDir.toPath()
             File chunkFile = dataDir.toPath()
                 .resolve(chunkInfo.getChunkName()).toFile();
                 .resolve(chunkInfo.getChunkName()).toFile();