Browse Source

HDFS-10232. Ozone: Make config key naming consistent. Contributed by Anu Engineer.

Anu Engineer 9 năm trước cách đây
mục cha
commit
ce524c584b
20 tập tin đã thay đổi với 139 bổ sung145 xóa
  1. 7 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  2. 18 16
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java
  3. 29 29
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java
  4. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java
  5. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java
  6. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java
  7. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java
  8. 13 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java
  9. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java
  10. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java
  11. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java
  12. 12 18
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java
  13. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/transport/server/TestContainerServer.java
  14. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/storage/TestStorageContainerManager.java
  15. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java
  16. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java
  17. 5 5
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java
  18. 7 7
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java
  19. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java
  20. 8 8
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java

+ 7 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -48,8 +48,8 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_DEF
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_MAX_NUM_BLOCKS_TO_LOG_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_DEFAULT;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_METRICS_LOGGER_PERIOD_SECONDS_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
@@ -188,7 +188,6 @@ import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.unix.DomainSocket;
-import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.container.ozoneimpl.OzoneContainer;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SaslPropertiesResolver;
@@ -456,9 +455,8 @@ public class DataNode extends ReconfigurableBase
     this.pipelineSupportECN = conf.getBoolean(
         DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED,
         DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED_DEFAULT);
-    this.ozoneEnabled = conf.getBoolean(OzoneConfigKeys
-        .DFS_OBJECTSTORE_ENABLED_KEY, OzoneConfigKeys
-        .DFS_OBJECTSTORE_ENABLED_DEFAULT);
+    this.ozoneEnabled = conf.getBoolean(OZONE_ENABLED,
+        OZONE_ENABLED_DEFAULT);
 
     confVersion = "core-" +
         conf.get("hadoop.common.configuration.version", "UNSPECIFIED") +
@@ -1294,7 +1292,7 @@ public class DataNode extends ReconfigurableBase
     // global DN settings
     registerMXBean();
     initDataXceiver(conf);
-    initObjectStoreHandler(conf);
+    initObjectStoreHandler();
     startInfoServer(conf);
     pauseMonitor = new JvmPauseMonitor();
     pauseMonitor.init(conf);
@@ -1331,12 +1329,10 @@ public class DataNode extends ReconfigurableBase
    * Initializes the object store handler.  This must be called before
    * initialization of the HTTP server.
    *
-   * @param config configuration
    * @throws IOException if there is an I/O error
    */
-  private void initObjectStoreHandler(Configuration config) throws IOException {
-    if (config.getBoolean(DFS_OBJECTSTORE_ENABLED_KEY,
-        DFS_OBJECTSTORE_ENABLED_DEFAULT)) {
+  private void initObjectStoreHandler() throws IOException {
+    if (this.ozoneEnabled) {
       this.objectStoreHandler = new ObjectStoreHandler(conf);
       LOG.info("ozone is enabled.");
     }

+ 18 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/ObjectStoreHandler.java

@@ -17,14 +17,14 @@
  */
 package org.apache.hadoop.hdfs.server.datanode;
 
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_OBJECTSTORE_TRACE_ENABLED_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_OBJECTSTORE_TRACE_ENABLED_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_RPC_ADDRESS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_RPC_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_RPC_BIND_HOST_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_RPC_DEFAULT_PORT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_TRACE_ENABLED_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HANDLER_TYPE_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_LOCATION_RPC_ADDRESS_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_LOCATION_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_LOCATION_RPC_BIND_HOST_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_LOCATION_RPC_DEFAULT_PORT;
 import static com.sun.jersey.api.core.ResourceConfig.PROPERTY_CONTAINER_REQUEST_FILTERS;
 import static com.sun.jersey.api.core.ResourceConfig.FEATURE_TRACE;
 
@@ -77,12 +77,12 @@ public final class ObjectStoreHandler implements Closeable {
    * @throws IOException if there is an I/O error
    */
   public ObjectStoreHandler(Configuration conf) throws IOException {
-    String shType = conf.getTrimmed(DFS_STORAGE_HANDLER_TYPE_KEY,
-        DFS_STORAGE_HANDLER_TYPE_DEFAULT);
+    String shType = conf.getTrimmed(OZONE_HANDLER_TYPE_KEY,
+        OZONE_HANDLER_TYPE_DEFAULT);
     LOG.info("ObjectStoreHandler initializing with {}: {}",
-        DFS_STORAGE_HANDLER_TYPE_KEY, shType);
-    boolean ozoneTrace = conf.getBoolean(DFS_OBJECTSTORE_TRACE_ENABLED_KEY,
-        DFS_OBJECTSTORE_TRACE_ENABLED_DEFAULT);
+        OZONE_HANDLER_TYPE_KEY, shType);
+    boolean ozoneTrace = conf.getBoolean(OZONE_TRACE_ENABLED_KEY,
+        OZONE_TRACE_ENABLED_DEFAULT);
     final StorageHandler storageHandler;
 
     // Initialize Jersey container for object store web application.
@@ -92,8 +92,10 @@ public final class ObjectStoreHandler implements Closeable {
       long version =
           RPC.getProtocolVersion(StorageContainerLocationProtocolPB.class);
       InetSocketAddress address = conf.getSocketAddr(
-          DFS_STORAGE_RPC_BIND_HOST_KEY, DFS_STORAGE_RPC_ADDRESS_KEY,
-          DFS_STORAGE_RPC_ADDRESS_DEFAULT, DFS_STORAGE_RPC_DEFAULT_PORT);
+          DFS_CONTAINER_LOCATION_RPC_BIND_HOST_KEY,
+          DFS_CONTAINER_LOCATION_RPC_ADDRESS_KEY,
+          DFS_CONTAINER_LOCATION_RPC_ADDRESS_DEFAULT,
+          DFS_CONTAINER_LOCATION_RPC_DEFAULT_PORT);
       this.storageContainerLocationClient =
           new StorageContainerLocationProtocolClientSideTranslatorPB(
               RPC.getProxy(StorageContainerLocationProtocolPB.class, version,
@@ -108,7 +110,7 @@ public final class ObjectStoreHandler implements Closeable {
       } else {
         throw new IllegalArgumentException(
             String.format("Unrecognized value for %s: %s",
-                DFS_STORAGE_HANDLER_TYPE_KEY, shType));
+                OZONE_HANDLER_TYPE_KEY, shType));
       }
     }
     ApplicationAdapter aa =

+ 29 - 29
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/OzoneConfigKeys.java

@@ -25,37 +25,37 @@ import org.apache.hadoop.classification.InterfaceAudience;
  */
 @InterfaceAudience.Private
 public final class OzoneConfigKeys {
-  public static final String DFS_OZONE_CONTAINER_IPC_PORT =
-      "dfs.ozone.container.ipc";
-  public static final int DFS_OZONE_CONTAINER_IPC_PORT_DEFAULT =  50011;
-  public static final String DFS_STORAGE_LOCAL_ROOT =
-      "dfs.ozone.localstorage.root";
-  public static final String DFS_STORAGE_LOCAL_ROOT_DEFAULT = "/tmp/ozone";
-  public static final String DFS_OBJECTSTORE_ENABLED_KEY =
-      "dfs.objectstore.enabled";
-  public static final boolean DFS_OBJECTSTORE_ENABLED_DEFAULT = false;
-  public static final String DFS_STORAGE_HANDLER_TYPE_KEY =
-      "dfs.storage.handler.type";
-  public static final String DFS_STORAGE_HANDLER_TYPE_DEFAULT = "distributed";
-  public static final String DFS_STORAGE_RPC_ADDRESS_KEY =
-      "dfs.storage.rpc-address";
-  public static final int DFS_STORAGE_RPC_DEFAULT_PORT = 50200;
-  public static final String DFS_STORAGE_RPC_ADDRESS_DEFAULT =
-      "0.0.0.0:" + DFS_STORAGE_RPC_DEFAULT_PORT;
-  public static final String DFS_STORAGE_RPC_BIND_HOST_KEY =
-      "dfs.storage.rpc-bind-host";
-  public static final String DFS_STORAGE_HANDLER_COUNT_KEY =
-      "dfs.storage.handler.count";
-  public static final int DFS_STORAGE_HANDLER_COUNT_DEFAULT = 10;
-  public static final String DFS_OBJECTSTORE_TRACE_ENABLED_KEY =
-      "dfs.objectstore.trace.enabled";
-  public static final boolean DFS_OBJECTSTORE_TRACE_ENABLED_DEFAULT = false;
+  public static final String DFS_CONTAINER_IPC_PORT =
+      "dfs.container.ipc";
+  public static final int DFS_CONTAINER_IPC_PORT_DEFAULT =  50011;
+  public static final String OZONE_LOCALSTORAGE_ROOT =
+      "ozone.localstorage.root";
+  public static final String OZONE_LOCALSTORAGE_ROOT_DEFAULT = "/tmp/ozone";
+  public static final String OZONE_ENABLED =
+      "ozone.enabled";
+  public static final boolean OZONE_ENABLED_DEFAULT = false;
+  public static final String OZONE_HANDLER_TYPE_KEY =
+      "ozone.handler.type";
+  public static final String OZONE_HANDLER_TYPE_DEFAULT = "distributed";
+  public static final String DFS_CONTAINER_LOCATION_RPC_ADDRESS_KEY =
+      "dfs.container.location.rpc-address";
+  public static final int DFS_CONTAINER_LOCATION_RPC_DEFAULT_PORT = 50200;
+  public static final String DFS_CONTAINER_LOCATION_RPC_ADDRESS_DEFAULT =
+      "0.0.0.0:" + DFS_CONTAINER_LOCATION_RPC_DEFAULT_PORT;
+  public static final String DFS_CONTAINER_LOCATION_RPC_BIND_HOST_KEY =
+      "dfs.container.rpc-bind-host";
+  public static final String DFS_CONTAINER_LOCATION_HANDLER_COUNT_KEY =
+      "dfs.container.handler.count";
+  public static final int DFS_CONTAINER_HANDLER_COUNT_DEFAULT = 10;
+  public static final String OZONE_TRACE_ENABLED_KEY =
+      "ozone.trace.enabled";
+  public static final boolean OZONE_TRACE_ENABLED_DEFAULT = false;
 
-  public static final String DFS_OZONE_METADATA_DIRS =
-      "dfs.ozone.metadata.dirs";
+  public static final String OZONE_METADATA_DIRS =
+      "ozone.metadata.dirs";
 
-  public static final String DFS_OZONE_KEY_CACHE = "dfs.ozone.key.cache.size";
-  public static final int DFS_OZONE_KEY_CACHE_DEFAULT = 1024;
+  public static final String OZONE_KEY_CACHE = "ozone.key.cache.size";
+  public static final int OZONE_KEY_CACHE_DEFAULT = 1024;
 
 
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/KeyManagerImpl.java

@@ -50,8 +50,8 @@ public class KeyManagerImpl implements KeyManager {
   public KeyManagerImpl(ContainerManager containerManager, Configuration conf) {
     Preconditions.checkNotNull(containerManager);
     Preconditions.checkNotNull(conf);
-    int cacheSize = conf.getInt(OzoneConfigKeys.DFS_OZONE_KEY_CACHE,
-        OzoneConfigKeys.DFS_OZONE_KEY_CACHE_DEFAULT);
+    int cacheSize = conf.getInt(OzoneConfigKeys.OZONE_KEY_CACHE,
+        OzoneConfigKeys.OZONE_KEY_CACHE_DEFAULT);
     this.containerManager = containerManager;
     containerCache = new ContainerCache(cacheSize, LOAD_FACTOR, true);
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/client/XceiverClient.java

@@ -83,8 +83,8 @@ public class XceiverClient implements Closeable {
     // port.
     int port = leader.getContainerPort();
     if (port == 0) {
-      port = config.getInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT,
-          OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT_DEFAULT);
+      port = config.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+          OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
     }
     LOG.debug("Connecting to server Port : " + port);
     channelFuture = b.connect(leader.getHostName(), port).sync();

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/transport/server/XceiverServer.java

@@ -50,8 +50,8 @@ public final class XceiverServer {
   public XceiverServer(Configuration conf,
                        ContainerDispatcher dispatcher) {
     Preconditions.checkNotNull(conf);
-    this.port = conf.getInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT,
-        OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT_DEFAULT);
+    this.port = conf.getInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
+        OzoneConfigKeys.DFS_CONTAINER_IPC_PORT_DEFAULT);
     this.storageContainer = dispatcher;
   }
 

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java

@@ -66,7 +66,7 @@ public class OzoneContainer {
       Exception {
     List<Path> locations = new LinkedList<>();
     String[] paths = ozoneConfig.getStrings(OzoneConfigKeys
-        .DFS_OZONE_METADATA_DIRS);
+        .OZONE_METADATA_DIRS);
     if (paths != null && paths.length > 0) {
       for (String p : paths) {
         locations.add(Paths.get(p));

+ 13 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/storage/StorageContainerManager.java

@@ -27,11 +27,11 @@ import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_HANDLER_COUNT_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
 import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_BIND_HOST_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_HANDLER_COUNT_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_RPC_ADDRESS_DEFAULT;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_RPC_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_RPC_BIND_HOST_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_HANDLER_COUNT_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_LOCATION_HANDLER_COUNT_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_LOCATION_RPC_ADDRESS_DEFAULT;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_LOCATION_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_LOCATION_RPC_BIND_HOST_KEY;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
@@ -190,16 +190,18 @@ public class StorageContainerManager
             new StorageContainerLocationProtocolServerSideTranslatorPB(this));
 
     InetSocketAddress storageRpcAddr = NetUtils.createSocketAddr(
-        conf.getTrimmed(DFS_STORAGE_RPC_ADDRESS_KEY,
-            DFS_STORAGE_RPC_ADDRESS_DEFAULT), -1, DFS_STORAGE_RPC_ADDRESS_KEY);
+        conf.getTrimmed(DFS_CONTAINER_LOCATION_RPC_ADDRESS_KEY,
+            DFS_CONTAINER_LOCATION_RPC_ADDRESS_DEFAULT),
+        -1, DFS_CONTAINER_LOCATION_RPC_ADDRESS_KEY);
 
     storageRpcServer = startRpcServer(conf, storageRpcAddr,
         StorageContainerLocationProtocolPB.class, storageProtoPbService,
-        DFS_STORAGE_RPC_BIND_HOST_KEY,
-        DFS_STORAGE_HANDLER_COUNT_KEY,
-        DFS_STORAGE_HANDLER_COUNT_DEFAULT);
+        DFS_CONTAINER_LOCATION_RPC_BIND_HOST_KEY,
+        DFS_CONTAINER_LOCATION_HANDLER_COUNT_KEY,
+        DFS_CONTAINER_HANDLER_COUNT_DEFAULT);
     storageRpcAddress = updateListenAddress(conf,
-        DFS_STORAGE_RPC_ADDRESS_KEY, storageRpcAddr, storageRpcServer);
+        DFS_CONTAINER_LOCATION_RPC_ADDRESS_KEY,
+        storageRpcAddr, storageRpcServer);
     LOG.info(buildRpcServerStartMessage(
         "StorageContainerLocationProtocol RPC server", storageRpcAddress));
   }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java

@@ -146,8 +146,8 @@ public final class OzoneMetadataManager {
 
     lock = new ReentrantReadWriteLock();
     storageRoot =
-        conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
-            OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
+        conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+            OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
 
     File file = new File(storageRoot + OBJECT_DIR);
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/MiniOzoneCluster.java

@@ -18,7 +18,7 @@
 package org.apache.hadoop.ozone;
 
 import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY;
-import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_STORAGE_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.ozone.OzoneConfigKeys.DFS_CONTAINER_LOCATION_RPC_ADDRESS_KEY;
 
 import java.io.Closeable;
 import java.io.IOException;
@@ -109,7 +109,7 @@ public class MiniOzoneCluster extends MiniDFSCluster implements Closeable {
       // MiniDFSCluster expects to find the default file system configured with
       // an HDFS URI.
       conf.set(FS_DEFAULT_NAME_KEY, "hdfs://127.0.0.1:0");
-      conf.set(DFS_STORAGE_RPC_ADDRESS_KEY, "127.0.0.1:0");
+      conf.set(DFS_CONTAINER_LOCATION_RPC_ADDRESS_KEY, "127.0.0.1:0");
       StorageContainerManager scm = new StorageContainerManager(conf);
       scm.start();
       return new MiniOzoneCluster(this, scm);

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java

@@ -88,11 +88,11 @@ public class TestContainerPersistence {
     URL p = conf.getClass().getResource("");
     path = p.getPath().concat(
         TestContainerPersistence.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
-        OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+        OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
+    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "local");
 
     File containerDir = new File(path);
     if (containerDir.exists()) {

+ 12 - 18
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/ozoneimpl/TestOzoneContainer.java

@@ -18,7 +18,6 @@
 
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
-import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
@@ -26,16 +25,11 @@ import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfiguration;
 import org.apache.hadoop.ozone.container.ContainerTestHelper;
 import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
-import org.apache.hadoop.ozone.container.common.impl.ContainerManagerImpl;
 import org.apache.hadoop.ozone.container.common.transport.client.XceiverClient;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
-import org.junit.AfterClass;
 import org.junit.Assert;
-import org.junit.BeforeClass;
 import org.junit.Test;
 
-import java.io.File;
-import java.io.IOException;
 import java.net.URL;
 
 public class TestOzoneContainer {
@@ -46,14 +40,14 @@ public class TestOzoneContainer {
     URL p = conf.getClass().getResource("");
     String path = p.getPath().concat(
         TestOzoneContainer.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
-        OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
+    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+        OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
+    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
 
     // We don't start Ozone Container via data node, we will do it
     // independently in our test path.
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, false);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, false);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "local");
 
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
@@ -61,7 +55,7 @@ public class TestOzoneContainer {
 
     Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline
         (containerName);
-    conf.setInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT,
+    conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
         pipeline.getLeader().getContainerPort());
     OzoneContainer container = new OzoneContainer(conf, cluster.getDataNodes
         ().get(0).getFSDataset());
@@ -88,17 +82,17 @@ public class TestOzoneContainer {
     URL p = conf.getClass().getResource("");
     String path = p.getPath().concat(
         TestOzoneContainer.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
-        OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
+    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+        OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
+    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
 
     // Start ozone container Via Datanode create.
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "local");
 
     Pipeline pipeline =
         ContainerTestHelper.createSingleNodePipeline(containerName);
-    conf.setInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT,
+    conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
         pipeline.getLeader().getContainerPort());
 
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/transport/server/TestContainerServer.java

@@ -72,7 +72,7 @@ public class TestContainerServer {
       Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline
           (containerName);
       OzoneConfiguration conf = new OzoneConfiguration();
-      conf.setInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT,
+      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
           pipeline.getLeader().getContainerPort());
 
       server = new XceiverServer(conf, new TestContainerDispatcher());
@@ -105,7 +105,7 @@ public class TestContainerServer {
       Pipeline pipeline = ContainerTestHelper.createSingleNodePipeline
           (containerName);
       OzoneConfiguration conf = new OzoneConfiguration();
-      conf.setInt(OzoneConfigKeys.DFS_OZONE_CONTAINER_IPC_PORT,
+      conf.setInt(OzoneConfigKeys.DFS_CONTAINER_IPC_PORT,
           pipeline.getLeader().getContainerPort());
 
       server = new XceiverServer(conf, new Dispatcher(

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/storage/TestStorageContainerManager.java

@@ -47,9 +47,9 @@ public class TestStorageContainerManager {
   @BeforeClass
   public static void init() throws IOException {
     conf = new OzoneConfiguration();
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "distributed");
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_TRACE_ENABLED_KEY, true);
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "distributed");
+    conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY, true);
   }
 
   @After

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneRestWithMiniCluster.java

@@ -54,9 +54,9 @@ public class TestOzoneRestWithMiniCluster {
   @BeforeClass
   public static void init() throws Exception {
     conf = new OzoneConfiguration();
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "distributed");
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_TRACE_ENABLED_KEY, true);
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "distributed");
+    conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY, true);
     cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitOzoneReady();
     ozoneClient = cluster.createOzoneClient();

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneVolumes.java

@@ -55,8 +55,8 @@ public class TestOzoneVolumes {
   /**
    * Create a MiniDFSCluster for testing.
    * <p>
-   * Ozone is made active by setting DFS_OBJECTSTORE_ENABLED_KEY = true and
-   * DFS_STORAGE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
    * emulate Ozone backend.
    *
    * @throws IOException
@@ -67,13 +67,13 @@ public class TestOzoneVolumes {
 
     URL p = conf.getClass().getResource("");
     String path = p.getPath().concat(TestOzoneVolumes.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
-        OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
+    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+        OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
 
-    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_TRACE_ENABLED_KEY, true);
+    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "local");
+    conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY, true);
     Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
 
     cluster = new MiniDFSCluster.Builder(conf).build();

+ 5 - 5
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/TestOzoneWebAccess.java

@@ -59,8 +59,8 @@ public class TestOzoneWebAccess {
   /**
    * Create a MiniDFSCluster for testing.
    *
-   * Ozone is made active by setting DFS_OBJECTSTORE_ENABLED_KEY = true and
-   * DFS_STORAGE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
    * emulate Ozone backend.
    *
    * @throws IOException
@@ -71,9 +71,9 @@ public class TestOzoneWebAccess {
 
     URL p = conf.getClass().getResource("");
     String path = p.getPath().concat(TestOzoneWebAccess.class.getSimpleName());
-    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "local");
 
     cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestBuckets.java

@@ -47,8 +47,8 @@ public class TestBuckets {
   /**
    * Create a MiniDFSCluster for testing.
    * <p>
-   * Ozone is made active by setting DFS_OBJECTSTORE_ENABLED_KEY = true and
-   * DFS_STORAGE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
    * emulate Ozone backend.
    *
    * @throws IOException
@@ -60,12 +60,12 @@ public class TestBuckets {
 
     URL p = conf.getClass().getResource("");
     String path = p.getPath().concat(TestBuckets.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
-        OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
+    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+        OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
 
-    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "local");
 
     cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestKeys.java

@@ -58,8 +58,8 @@ public class TestKeys {
   /**
    * Create a MiniDFSCluster for testing.
    *
-   * Ozone is made active by setting DFS_OBJECTSTORE_ENABLED_KEY = true and
-   * DFS_STORAGE_HANDLER_TYPE_KEY = "local" , which uses a local
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local
    * directory to emulate Ozone backend.
    *
    * @throws IOException
@@ -71,14 +71,14 @@ public class TestKeys {
 
     URL p = conf.getClass().getResource("");
     path = p.getPath().concat(TestKeys.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
-                            OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
+    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+                            OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
 
-    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
+    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "local");
 
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_TRACE_ENABLED_KEY, true);
+    conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY, true);
     Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
 
 

+ 8 - 8
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/web/client/TestVolume.java

@@ -53,8 +53,8 @@ public class TestVolume {
   /**
    * Create a MiniDFSCluster for testing.
    * <p>
-   * Ozone is made active by setting DFS_OBJECTSTORE_ENABLED_KEY = true and
-   * DFS_STORAGE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
+   * Ozone is made active by setting OZONE_ENABLED = true and
+   * OZONE_HANDLER_TYPE_KEY = "local" , which uses a local directory to
    * emulate Ozone backend.
    *
    * @throws IOException
@@ -66,14 +66,14 @@ public class TestVolume {
 
     URL p = conf.getClass().getResource("");
     String path = p.getPath().concat(TestVolume.class.getSimpleName());
-    path += conf.getTrimmed(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT,
-        OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT_DEFAULT);
+    path += conf.getTrimmed(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT,
+        OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT_DEFAULT);
     FileUtils.deleteDirectory(new File(path));
 
-    conf.set(OzoneConfigKeys.DFS_STORAGE_LOCAL_ROOT, path);
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_ENABLED_KEY, true);
-    conf.set(OzoneConfigKeys.DFS_STORAGE_HANDLER_TYPE_KEY, "local");
-    conf.setBoolean(OzoneConfigKeys.DFS_OBJECTSTORE_TRACE_ENABLED_KEY, true);
+    conf.set(OzoneConfigKeys.OZONE_LOCALSTORAGE_ROOT, path);
+    conf.setBoolean(OzoneConfigKeys.OZONE_ENABLED, true);
+    conf.set(OzoneConfigKeys.OZONE_HANDLER_TYPE_KEY, "local");
+    conf.setBoolean(OzoneConfigKeys.OZONE_TRACE_ENABLED_KEY, true);
     Logger.getLogger("log4j.logger.org.apache.http").setLevel(Level.DEBUG);
 
     cluster = new MiniDFSCluster.Builder(conf).build();