소스 검색

HDFS-11728. Ozone: add the DB names to OzoneConsts. Contributed by Chen Liang.

Weiwei Yang 8 년 전
부모
커밋
b581cde542

+ 8 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java

@@ -64,7 +64,6 @@ public final class OzoneConsts {
   public static final String CONTAINER_DATA_PATH = "data";
   public static final String CONTAINER_ROOT_PREFIX = "repository";
 
-  public static final String CONTAINER_DB = "container.db";
   public static final String FILE_HASH = "SHA-256";
   public final static String CHUNK_OVERWRITE = "OverWriteRequested";
 
@@ -74,6 +73,14 @@ public final class OzoneConsts {
   public static final long GB = MB * 1024L;
   public static final long TB = GB * 1024L;
 
+  /**
+   * level DB names used by SCM and data nodes.
+   */
+  public static final String CONTAINER_DB = "container.db";
+  public static final String BLOCK_DB = "block.db";
+  public static final String NODEPOOL_DB = "nodepool.db";
+  public static final String OPEN_CONTAINERS_DB = "openContainers.db";
+
   /**
    * Supports Bucket Versioning.
    */

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/block/BlockManagerImpl.java

@@ -47,6 +47,8 @@ import java.util.Random;
 import java.util.stream.Collectors;
 import java.util.UUID;
 
+import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
+import static org.apache.hadoop.ozone.OzoneConsts.OPEN_CONTAINERS_DB;
 import static org.apache.hadoop.ozone.scm.exceptions.SCMException
     .ResultCodes.CHILL_MODE_EXCEPTION;
 import static org.apache.hadoop.ozone.scm.exceptions.SCMException
@@ -105,7 +107,7 @@ public class BlockManagerImpl implements BlockManager {
     options.createIfMissing();
 
     // Write the block key to container name mapping.
-    File blockContainerDbPath = new File(scmMetaDataDir, "block.db");
+    File blockContainerDbPath = new File(scmMetaDataDir, BLOCK_DB);
     blockStore = new LevelDBStore(blockContainerDbPath, options);
 
     this.containerSize = OzoneConsts.GB * conf.getInt(
@@ -113,7 +115,7 @@ public class BlockManagerImpl implements BlockManager {
         ScmConfigKeys.OZONE_SCM_CONTAINER_SIZE_DEFAULT);
 
     // Load store of all open contains for block allocation
-    File openContainsDbPath = new File(scmMetaDataDir, "openContainers.db");
+    File openContainsDbPath = new File(scmMetaDataDir, OPEN_CONTAINERS_DB);
     openContainerStore = new LevelDBStore(openContainsDbPath, options);
     openContainers = new HashMap<>();
     loadOpenContainers();

+ 3 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/container/ContainerMapping.java

@@ -43,6 +43,8 @@ import java.util.List;
 import java.util.concurrent.locks.Lock;
 import java.util.concurrent.locks.ReentrantLock;
 
+import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
+
 /**
  * Mapping class contains the mapping from a name to a pipeline mapping. This is
  * used by SCM when allocating new locations and when looking up a key.
@@ -88,7 +90,7 @@ public class ContainerMapping implements Mapping {
     options.createIfMissing();
 
     // Write the container name to pipeline mapping.
-    File containerDBPath = new File(scmMetaDataDir, "container.db");
+    File containerDBPath = new File(scmMetaDataDir, CONTAINER_DB);
     containerStore = new LevelDBStore(containerDBPath, options);
 
     this.lock = new ReentrantLock();

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/node/SCMNodePoolManager.java

@@ -43,6 +43,7 @@ import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.stream.Collectors;
 
+import static org.apache.hadoop.ozone.OzoneConsts.NODEPOOL_DB;
 import static org.apache.hadoop.ozone.scm
     .exceptions.SCMException.ResultCodes.FAILED_TO_LOAD_NODEPOOL;
 import static org.apache.hadoop.ozone.scm
@@ -90,7 +91,7 @@ public final class SCMNodePoolManager implements NodePoolManager {
     options.cacheSize(cacheSize * OzoneConsts.MB);
     options.createIfMissing();
 
-    File nodePoolDBPath = new File(scmMetaDataDir, "nodepool.db");
+    File nodePoolDBPath = new File(scmMetaDataDir, NODEPOOL_DB);
     nodePoolStore = new LevelDBStore(nodePoolDBPath, options);
     nodePools = new HashMap<>();
     lock = new ReentrantReadWriteLock();