浏览代码

HDFS-11103. Ozone: Cleanup some dependencies. Contributed by Anu Engineer.

Anu Engineer 8 年之前
父节点
当前提交
7b761f18de

+ 23 - 45
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerLocationManagerImpl.java

@@ -19,26 +19,21 @@
 package org.apache.hadoop.ozone.container.common.impl;
 
 import com.google.common.base.Preconditions;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.ozone.OzoneConsts;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerLocationManager;
-
-
+import org.apache.hadoop.ozone.container.common.interfaces
+    .ContainerLocationManager;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
 import java.nio.file.Path;
-import java.nio.file.Paths;
-import java.util.LinkedList;
 import java.util.List;
 
 /**
  * A class that tells the ContainerManager where to place the containers.
  * Please note : There is *no* one-to-one correlation between metadata
- * locations and data locations.
+ * metadataLocations and data metadataLocations.
  *
  *  For example : A user could map all container files to a
  *  SSD but leave data/metadata on bunch of other disks.
@@ -47,46 +42,27 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager {
   private static final Logger LOG =
       LoggerFactory.getLogger(ContainerLocationManagerImpl.class);
 
-
-  private final Configuration conf;
-  private final FsDatasetSpi<? extends FsVolumeSpi> dataset;
-  private final Path[] volumePaths;
+  private final List<StorageLocation> dataLocations;
   private int currentIndex;
-  private final List<Path> locations;
-
+  private final List<StorageLocation> metadataLocations;
 
   /**
    * Constructs a Location Manager.
-   * @param conf - Configuration.
+   * @param metadataLocations  - Refers to the metadataLocations
+   * where we store the container metadata.
+   * @param dataDirs - metadataLocations where we store the actual
+   * data or chunk files.
+   * @throws IOException
    */
-  public ContainerLocationManagerImpl(
-      Configuration conf, List<Path> locations,
-      FsDatasetSpi<? extends FsVolumeSpi> dataset) throws IOException {
-    this.conf = conf;
-    this.dataset = dataset;
-    List<Path> pathList = new LinkedList<>();
-    FsDatasetSpi.FsVolumeReferences references;
-    try {
-      synchronized (this.dataset) {
-        references = this.dataset.getFsVolumeReferences();
-        for (int ndx = 0; ndx < references.size(); ndx++) {
-          FsVolumeSpi vol = references.get(ndx);
-          pathList.add(Paths.get(vol.getBaseURI().getPath()));
-        }
-        references.close();
-        volumePaths = pathList.toArray(new Path[pathList.size()]);
-        this.locations = locations;
-      }
-    } catch (IOException ex) {
-      LOG.error("Unable to get volume paths.", ex);
-      throw new IOException("Internal error", ex);
-    }
-
+  public ContainerLocationManagerImpl(List<StorageLocation> metadataLocations,
+      List<StorageLocation> dataDirs)
+      throws IOException {
+    dataLocations = dataDirs;
+    this.metadataLocations = metadataLocations;
   }
-
   /**
    * Returns the path where the container should be placed from a set of
-   * locations.
+   * metadataLocations.
    *
    * @return A path where we should place this container and metadata.
    * @throws IOException
@@ -94,9 +70,10 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager {
   @Override
   public Path getContainerPath()
       throws IOException {
-    Preconditions.checkState(locations.size() > 0);
-    int index = currentIndex % locations.size();
-    return locations.get(index).resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
+    Preconditions.checkState(metadataLocations.size() > 0);
+    int index = currentIndex % metadataLocations.size();
+    Path path = metadataLocations.get(index).getFile().toPath();
+    return path.resolve(OzoneConsts.CONTAINER_ROOT_PREFIX);
   }
 
   /**
@@ -107,7 +84,8 @@ public class ContainerLocationManagerImpl implements ContainerLocationManager {
    */
   @Override
   public Path getDataPath(String containerName) throws IOException {
-    Path currentPath = volumePaths[currentIndex++ % volumePaths.length];
+    Path currentPath = dataLocations.get(currentIndex++ % dataLocations.size())
+        .getFile().toPath();
     currentPath = currentPath.resolve(OzoneConsts.CONTAINER_PREFIX);
     return currentPath.resolve(containerName);
   }

+ 21 - 14
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/impl/ContainerManagerImpl.java

@@ -24,17 +24,17 @@ import org.apache.commons.codec.digest.DigestUtils;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.interfaces.ChunkManager;
-import org.apache.hadoop.ozone.container.common.interfaces.ContainerLocationManager;
+import org.apache.hadoop.ozone.container.common.interfaces
+    .ContainerLocationManager;
 import org.apache.hadoop.ozone.container.common.interfaces.ContainerManager;
 import org.apache.hadoop.ozone.container.common.interfaces.KeyManager;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
@@ -49,11 +49,13 @@ import java.security.DigestInputStream;
 import java.security.DigestOutputStream;
 import java.security.MessageDigest;
 import java.security.NoSuchAlgorithmException;
+import java.util.LinkedList;
 import java.util.List;
 import java.util.concurrent.ConcurrentNavigableMap;
 import java.util.concurrent.ConcurrentSkipListMap;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_EXTENSION;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_META;
 
@@ -85,17 +87,16 @@ public class ContainerManagerImpl implements ContainerManager {
    */
   @Override
   public void init(
-      Configuration config, List<Path> containerDirs,
-      FsDatasetSpi<? extends FsVolumeSpi> dataset) throws IOException {
-
+      Configuration config, List<StorageLocation> containerDirs)
+      throws IOException {
     Preconditions.checkNotNull(config);
     Preconditions.checkNotNull(containerDirs);
     Preconditions.checkState(containerDirs.size() > 0);
 
     readLock();
     try {
-      for (Path path : containerDirs) {
-        File directory = path.toFile();
+      for (StorageLocation path : containerDirs) {
+        File directory = path.getFile();
         if (!directory.isDirectory()) {
           LOG.error("Invalid path to container metadata directory. path: {}",
               path.toString());
@@ -112,8 +113,14 @@ public class ContainerManagerImpl implements ContainerManager {
           }
         }
       }
-      this.locationManager = new ContainerLocationManagerImpl(config,
-          containerDirs, dataset);
+
+      List<StorageLocation> dataDirs = new LinkedList<>();
+      for (String dir : config.getStrings(DFS_DATANODE_DATA_DIR_KEY)) {
+        StorageLocation location = StorageLocation.parse(dir);
+        dataDirs.add(location);
+      }
+      this.locationManager =
+          new ContainerLocationManagerImpl(containerDirs, dataDirs);
 
     } finally {
       readUnlock();
@@ -286,8 +293,8 @@ public class ContainerManagerImpl implements ContainerManager {
       // In case of ozone this is *not* a deal breaker since
       // SCM is guaranteed to generate unique container names.
 
-      LOG.error("creation of container failed. Name: {} "
-          , containerData.getContainerName());
+      LOG.error("creation of container failed. Name: {} ",
+          containerData.getContainerName());
       throw ex;
     } finally {
       IOUtils.closeStream(dos);
@@ -528,7 +535,7 @@ public class ContainerManagerImpl implements ContainerManager {
      * @param containerData - ContainerData.
      * @param active        - Active or not active.
      */
-    public ContainerStatus(ContainerData containerData, boolean active) {
+    ContainerStatus(ContainerData containerData, boolean active) {
       this.containerData = containerData;
       this.active = active;
     }

+ 3 - 6
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/interfaces/ContainerManager.java

@@ -1,4 +1,4 @@
-/*
+/**
  * Licensed to the Apache Software Foundation (ASF) under one
  * or more contributor license agreements.  See the NOTICE file
  * distributed with this work for additional information
@@ -21,14 +21,12 @@ package org.apache.hadoop.ozone.container.common.interfaces;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.util.RwLock;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 
 import java.io.IOException;
-import java.nio.file.Path;
 import java.util.List;
 
 /**
@@ -45,8 +43,7 @@ public interface ContainerManager extends RwLock {
    * @param containerDirs - List of Metadata Container locations.
    * @throws IOException
    */
-  void init(Configuration config, List<Path> containerDirs,
-            FsDatasetSpi<? extends FsVolumeSpi> dataset)
+  void init(Configuration config, List<StorageLocation> containerDirs)
       throws IOException;
 
   /**

+ 6 - 9
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/ozoneimpl/OzoneContainer.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.container.ozoneimpl;
 
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
 import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
@@ -34,10 +35,8 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import java.io.IOException;
-import java.nio.file.Paths;
 import java.util.LinkedList;
 import java.util.List;
-import java.nio.file.Path;
 
 /**
  * Ozone main class sets up the network server and initializes the container
@@ -48,7 +47,6 @@ public class OzoneContainer {
       LoggerFactory.getLogger(OzoneContainer.class);
 
   private final Configuration ozoneConfig;
-  private final FsDatasetSpi<? extends FsVolumeSpi> dataSet;
   private final ContainerDispatcher dispatcher;
   private final ContainerManager manager;
   private final XceiverServer server;
@@ -65,22 +63,21 @@ public class OzoneContainer {
   public OzoneContainer(
       Configuration ozoneConfig,
       FsDatasetSpi<? extends FsVolumeSpi> dataSet) throws Exception {
-    List<Path> locations = new LinkedList<>();
+    List<StorageLocation> locations = new LinkedList<>();
     String[] paths = ozoneConfig.getStrings(OzoneConfigKeys
         .OZONE_METADATA_DIRS);
     if (paths != null && paths.length > 0) {
       for (String p : paths) {
-        locations.add(Paths.get(p));
+        locations.add(StorageLocation.parse(p));
       }
     } else {
       getDataDir(dataSet, locations);
     }
 
     this.ozoneConfig = ozoneConfig;
-    this.dataSet = dataSet;
 
     manager = new ContainerManagerImpl();
-    manager.init(this.ozoneConfig, locations, this.dataSet);
+    manager.init(this.ozoneConfig, locations);
     this.chunkManager = new ChunkManagerImpl(manager);
     manager.setChunkManager(this.chunkManager);
 
@@ -153,14 +150,14 @@ public class OzoneContainer {
    */
   private void getDataDir(
       FsDatasetSpi<? extends FsVolumeSpi> dataset,
-      List<Path> pathList) throws IOException {
+      List<StorageLocation> pathList) throws IOException {
     FsDatasetSpi.FsVolumeReferences references;
     try {
       synchronized (dataset) {
         references = dataset.getFsVolumeReferences();
         for (int ndx = 0; ndx < references.size(); ndx++) {
           FsVolumeSpi vol = references.get(ndx);
-          pathList.add(Paths.get(vol.getBaseURI().getPath()));
+          pathList.add(StorageLocation.parse(vol.getBaseURI().getPath()));
         }
         references.close();
       }

+ 31 - 37
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestContainerPersistence.java

@@ -1,19 +1,18 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- *  with the License.  You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- *  Unless required by applicable law or agreed to in writing, software
- *  distributed under the License is distributed on an "AS IS" BASIS,
- *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- *  See the License for the specific language governing permissions and
- *  limitations under the License.
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
  */
 
 package org.apache.hadoop.ozone.container.common.impl;
@@ -21,8 +20,7 @@ package org.apache.hadoop.ozone.container.common.impl;
 import org.apache.commons.codec.binary.Hex;
 import org.apache.commons.io.FileUtils;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
-import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsVolumeSpi;
+import org.apache.hadoop.hdfs.server.datanode.StorageLocation;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfiguration;
@@ -31,9 +29,9 @@ import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.helpers.KeyData;
-import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.ozone.container.common.utils.LevelDBStore;
 import org.apache.hadoop.ozone.web.utils.OzoneUtils;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.junit.After;
 import org.junit.AfterClass;
 import org.junit.Assert;
@@ -64,7 +62,6 @@ import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
 import static org.apache.hadoop.ozone.container.ContainerTestHelper
     .setDataChecksum;
-import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.fail;
 
 /**
@@ -85,9 +82,8 @@ public class TestContainerPersistence {
   private static ChunkManagerImpl chunkManager;
   private static KeyManagerImpl keyManager;
   private static OzoneConfiguration conf;
-  private static FsDatasetSpi<? extends FsVolumeSpi> fsDataSet;
   private static MiniOzoneCluster cluster;
-  private static List<Path> pathLists = new LinkedList<>();
+  private static List<StorageLocation> pathLists = new LinkedList<>();
 
   @BeforeClass
   public static void init() throws Throwable {
@@ -103,12 +99,10 @@ public class TestContainerPersistence {
     if (containerDir.exists()) {
       FileUtils.deleteDirectory(new File(path));
     }
-
     Assert.assertTrue(containerDir.mkdirs());
 
     cluster = new MiniOzoneCluster.Builder(conf)
         .setHandlerType("local").build();
-    fsDataSet = cluster.getDataNodes().get(0).getFSDataset();
     containerManager = new ContainerManagerImpl();
     chunkManager = new ChunkManagerImpl(containerManager);
     containerManager.setChunkManager(chunkManager);
@@ -130,8 +124,8 @@ public class TestContainerPersistence {
     }
     pathLists.clear();
     containerManager.getContainerMap().clear();
-    pathLists.add(Paths.get(path));
-    containerManager.init(conf, pathLists, fsDataSet);
+    pathLists.add(StorageLocation.parse(path.toString()));
+    containerManager.init(conf, pathLists);
   }
 
   @After
@@ -190,8 +184,8 @@ public class TestContainerPersistence {
     containerManager.createContainer(createSingleNodePipeline(containerName),
         data);
     try {
-      containerManager.createContainer(createSingleNodePipeline
-          (containerName), data);
+      containerManager.createContainer(createSingleNodePipeline(
+          containerName), data);
       fail("Expected Exception not thrown.");
     } catch (IOException ex) {
       Assert.assertNotNull(ex);
@@ -207,14 +201,14 @@ public class TestContainerPersistence {
     ContainerData data = new ContainerData(containerName1);
     data.addMetadata("VOLUME", "shire");
     data.addMetadata("owner)", "bilbo");
-    containerManager.createContainer(createSingleNodePipeline(containerName1)
-        , data);
+    containerManager.createContainer(createSingleNodePipeline(containerName1),
+        data);
 
     data = new ContainerData(containerName2);
     data.addMetadata("VOLUME", "shire");
     data.addMetadata("owner)", "bilbo");
-    containerManager.createContainer(createSingleNodePipeline(containerName2)
-        , data);
+    containerManager.createContainer(createSingleNodePipeline(containerName2),
+        data);
 
 
     Assert.assertTrue(containerManager.getContainerMap()
@@ -233,8 +227,8 @@ public class TestContainerPersistence {
     data = new ContainerData(containerName1);
     data.addMetadata("VOLUME", "shire");
     data.addMetadata("owner)", "bilbo");
-    containerManager.createContainer(createSingleNodePipeline(containerName1)
-        , data);
+    containerManager.createContainer(createSingleNodePipeline(containerName1),
+        data);
 
     // Assert we still have both containers.
     Assert.assertTrue(containerManager.getContainerMap()
@@ -262,8 +256,8 @@ public class TestContainerPersistence {
       ContainerData data = new ContainerData(containerName);
       data.addMetadata("VOLUME", "shire");
       data.addMetadata("owner)", "bilbo");
-      containerManager.createContainer(createSingleNodePipeline
-          (containerName), data);
+      containerManager.createContainer(createSingleNodePipeline(containerName),
+          data);
       testMap.put(containerName, data);
     }
 
@@ -289,7 +283,7 @@ public class TestContainerPersistence {
   }
 
   private ChunkInfo writeChunkHelper(String containerName, String keyName,
-                                     Pipeline pipeline) throws IOException,
+      Pipeline pipeline) throws IOException,
       NoSuchAlgorithmException {
     final int datalen = 1024;
     pipeline.setContainerName(containerName);