瀏覽代碼

HDFS-11680. Ozone: SCM CLI: Implement info container command. Contributed by Yuanbo Liu.

Weiwei Yang 8 年之前
父節點
當前提交
cee6438005

+ 31 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ContainerOperationClient.java

@@ -17,6 +17,8 @@
  */
 package org.apache.hadoop.scm.client;
 
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ReadContainerResponseProto;
 import org.apache.hadoop.scm.XceiverClientSpi;
 import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.apache.hadoop.scm.XceiverClientManager;
@@ -154,6 +156,35 @@ public class ContainerOperationClient implements ScmClient {
     }
   }
 
+  /**
+   * Get meta data from an existing container.
+   *
+   * @param pipeline - pipeline that represents the container.
+   * @return ContainerInfo - a message of protobuf which has basic info
+   * of a container.
+   * @throws IOException
+   */
+  @Override
+  public ContainerData readContainer(Pipeline pipeline) throws IOException {
+    XceiverClientSpi client = null;
+    try {
+      client = xceiverClientManager.acquireClient(pipeline);
+      String traceID = UUID.randomUUID().toString();
+      ReadContainerResponseProto response =
+          ContainerProtocolCalls.readContainer(client,
+              pipeline.getContainerName(), traceID);
+      LOG.info("Read container {}, leader: {}, machines: {} ",
+          pipeline.getContainerName(),
+          pipeline.getLeader(),
+          pipeline.getMachines());
+      return response.getContainerData();
+    } finally {
+      if (client != null) {
+        xceiverClientManager.releaseClient(client);
+      }
+    }
+  }
+
   /**
    * Given an id, return the pipeline associated with the container.
    * @param containerId - String Container ID

+ 11 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/client/ScmClient.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.scm.client;
 
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 
 import java.io.IOException;
@@ -51,13 +52,22 @@ public interface ScmClient {
   Pipeline getContainer(String containerId) throws IOException;
 
   /**
-   * Delets an existing container.
+   * Deletes an existing container.
    * @param pipeline - Pipeline that represents the container.
    * @param force - true to forcibly delete the container.
    * @throws IOException
    */
   void deleteContainer(Pipeline pipeline, boolean force) throws IOException;
 
+  /**
+   * Read meta data from an existing container.
+   * @param pipeline - Pipeline that represents the container.
+   * @return ContainerInfo
+   * @throws IOException
+   */
+  ContainerData readContainer(Pipeline pipeline) throws IOException;
+
+
   /**
    * Gets the container size -- Computed by SCM from Container Reports.
    * @param pipeline - Pipeline

+ 31 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/scm/storage/ContainerProtocolCalls.java

@@ -45,6 +45,10 @@ import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Type;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
     .WriteChunkRequestProto;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
+    .ReadContainerResponseProto;
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos
+    .ReadContainerRequestProto;
 import org.apache.hadoop.scm.container.common.helpers.StorageContainerException;
 
 import java.io.IOException;
@@ -272,9 +276,35 @@ public final class ContainerProtocolCalls {
   }
 
   /**
-   * Reads the data given the container name and key.
+   * readContainer call that gets meta data from an existing container.
    *
    * @param client - client
+   * @param traceID - trace ID
+   * @throws IOException
+   */
+  public static ReadContainerResponseProto readContainer(
+      XceiverClientSpi client, String containerName,
+      String traceID) throws IOException {
+    ReadContainerRequestProto.Builder readRequest =
+        ReadContainerRequestProto.newBuilder();
+    readRequest.setName(containerName);
+    readRequest.setPipeline(client.getPipeline().getProtobufMessage());
+
+    ContainerCommandRequestProto.Builder request =
+        ContainerCommandRequestProto.newBuilder();
+    request.setCmdType(Type.ReadContainer);
+    request.setReadContainer(readRequest);
+    request.setTraceID(traceID);
+    ContainerCommandResponseProto response =
+        client.sendCommand(request.build());
+    validateContainerResponse(response);
+    return response.getReadContainer();
+  }
+
+  /**
+   * Reads the data given the container name and key.
+   *
+   * @param client
    * @param containerName - name of the container
    * @param key - key
    * @param traceID - trace ID

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerData.java

@@ -102,10 +102,16 @@ public class ContainerData {
       builder.setDbPath(this.getDBPath());
     }
 
+    if (this.getHash() != null) {
+      builder.setHash(this.getHash());
+    }
+
     if (this.getContainerPath() != null) {
       builder.setContainerPath(this.getContainerPath());
     }
 
+    builder.setOpen(this.isOpen());
+
     for (Map.Entry<String, String> entry : metadata.entrySet()) {
       ContainerProtos.KeyValue.Builder keyValBuilder =
           ContainerProtos.KeyValue.newBuilder();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/container/common/helpers/ContainerReport.java

@@ -27,7 +27,7 @@ import org.apache.hadoop.ozone.protocol.proto.StorageContainerDatanodeProtocolPr
  * The protobuf counter part of this class looks like this.
  * message ContainerInfo {
  * required string containerName = 1;
- * repeated bytes finalhash = 2;
+ * required string finalhash = 2;
  * optional int64 size = 3;
  * optional int64 keycount = 4;
  * }

+ 7 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/container/ContainerCommandHandler.java

@@ -33,6 +33,8 @@ import static org.apache.hadoop.ozone.scm.cli.container
     .CreateContainerHandler.CONTAINER_CREATE;
 import static org.apache.hadoop.ozone.scm.cli.container
     .DeleteContainerHandler.CONTAINER_DELETE;
+import static org.apache.hadoop.ozone.scm.cli.container
+    .InfoContainerHandler.CONTAINER_INFO;
 
 /**
  * The handler class of container-specific commands, e.g. createContainer.
@@ -57,6 +59,8 @@ public class ContainerCommandHandler extends OzoneCommandHandler {
       handler = new CreateContainerHandler(getScmClient());
     } else if (cmd.hasOption(CONTAINER_DELETE)) {
       handler = new DeleteContainerHandler(getScmClient());
+    } else if (cmd.hasOption(CONTAINER_INFO)) {
+      handler = new InfoContainerHandler(getScmClient());
     }
 
     // execute the sub command, throw exception if no sub command found
@@ -87,11 +91,14 @@ public class ContainerCommandHandler extends OzoneCommandHandler {
   private static void addCommandsOption(Options options) {
     Option createContainer =
         new Option(CONTAINER_CREATE, false, "Create container");
+    Option infoContainer =
+        new Option(CONTAINER_INFO, true, "Info container");
     Option deleteContainer =
         new Option(CONTAINER_DELETE, true, "Delete container");
 
     options.addOption(createContainer);
     options.addOption(deleteContainer);
+    options.addOption(infoContainer);
     // TODO : add other options such as delete, close etc.
   }
 

+ 91 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/container/InfoContainerHandler.java

@@ -0,0 +1,91 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ozone.scm.cli.container;
+
+import com.google.common.base.Preconditions;
+import org.apache.commons.cli.CommandLine;
+import org.apache.commons.cli.HelpFormatter;
+import org.apache.commons.cli.Options;
+
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.ozone.scm.cli.OzoneCommandHandler;
+import org.apache.hadoop.scm.client.ScmClient;
+import org.apache.hadoop.scm.container.common.helpers.Pipeline;
+
+import java.io.IOException;
+import java.util.stream.Collectors;
+
+/**
+ * This is the handler that process container info command.
+ */
+public class InfoContainerHandler extends OzoneCommandHandler {
+
+  public static final String CONTAINER_INFO = "info";
+
+  /**
+   * Constructs a handler object.
+   *
+   * @param scmClient scm client.
+   */
+  public InfoContainerHandler(ScmClient scmClient) {
+    super(scmClient);
+  }
+
+  @Override
+  public void execute(CommandLine cmd) throws IOException {
+    if (!cmd.hasOption(CONTAINER_INFO)) {
+      throw new IOException("Expecting container info");
+    }
+    String containerName = cmd.getOptionValue(CONTAINER_INFO);
+    Pipeline pipeline = getScmClient().getContainer(containerName);
+    Preconditions.checkNotNull(pipeline, "Pipeline cannot be null");
+
+    ContainerData containerData =
+        getScmClient().readContainer(pipeline);
+
+    // Print container report info.
+    logOut("Container Name: %s",
+        containerData.getName());
+    String openStatus = containerData.getOpen() ? "OPEN" : "CLOSED";
+    logOut("Container State: %s", openStatus);
+    if (!containerData.getHash().isEmpty()) {
+      logOut("Container Hash: %s", containerData.getHash());
+    }
+    logOut("Container DB Path: %s", containerData.getDbPath());
+    logOut("Container Path: %s", containerData.getContainerPath());
+
+    // Output meta data.
+    String metadataStr = containerData.getMetadataList().stream().map(
+        p -> p.getKey() + ":" + p.getValue()).collect(Collectors.joining(", "));
+    logOut("Container Metadata: {%s}", metadataStr);
+
+    // Print pipeline of an existing container.
+    logOut("LeaderID: %s", pipeline.getLeader().getHostName());
+    String machinesStr = pipeline.getMachines().stream().map(
+        DatanodeID::getHostName).collect(Collectors.joining(","));
+    logOut("Datanodes: [%s]", machinesStr);
+  }
+
+  @Override
+  public void displayHelp() {
+    HelpFormatter helpFormatter = new HelpFormatter();
+    helpFormatter.printHelp("hdfs scm -container -info <container name>",
+        new Options());
+  }
+}

+ 16 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/cblock/util/MockStorageClient.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.cblock.util;
 
+import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.ContainerData;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
@@ -60,6 +61,21 @@ public class MockStorageClient implements ScmClient {
 
   }
 
+  /**
+   * Create a instance of ContainerData by a given container id,
+   * since this is a testing class, there is no need set up the hold
+   * env to get the meta data of the container.
+   * @param pipeline
+   * @return
+   * @throws IOException
+   */
+  @Override
+  public ContainerData readContainer(Pipeline pipeline) throws IOException {
+    return ContainerData.newBuilder()
+        .setName(pipeline.getContainerName())
+        .build();
+  }
+
   /**
    * Return reference to an *existing* container with given ID.
    *

+ 123 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestSCMCli.java

@@ -17,6 +17,7 @@
  */
 package org.apache.hadoop.ozone.scm;
 
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfiguration;
@@ -30,14 +31,19 @@ import org.apache.hadoop.scm.client.ContainerOperationClient;
 import org.apache.hadoop.scm.client.ScmClient;
 import org.apache.hadoop.scm.container.common.helpers.Pipeline;
 import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
+import org.apache.hadoop.util.StringUtils;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.BeforeClass;
+import org.junit.Rule;
 import org.junit.Test;
+import org.junit.rules.Timeout;
 
 import java.io.ByteArrayOutputStream;
 import java.io.IOException;
 import java.io.PrintStream;
+import java.util.List;
+import java.util.stream.Collectors;
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertNotNull;
@@ -63,6 +69,9 @@ public class TestSCMCli {
   private static ByteArrayOutputStream errContent;
   private static PrintStream errStream;
 
+  @Rule
+  public Timeout globalTimeout = new Timeout(30000);
+
   @BeforeClass
   public static void setup() throws Exception {
     conf = new OzoneConfiguration();
@@ -214,6 +223,116 @@ public class TestSCMCli {
         .contains("Specified key does not exist."));
   }
 
+  @Test
+  public void testInfoContainer() throws Exception {
+    // The cluster has one Datanode server.
+    DatanodeID datanodeID = cluster.getDataNodes().get(0).getDatanodeId();
+    String formatStr =
+        "Container Name: %s\n" +
+        "Container State: %s\n" +
+        "Container DB Path: %s\n" +
+        "Container Path: %s\n" +
+        "Container Metadata: {%s}\n" +
+        "LeaderID: %s\n" +
+        "Datanodes: [%s]\n";
+
+    String formatStrWithHash =
+        "Container Name: %s\n" +
+        "Container State: %s\n" +
+        "Container Hash: %s\n" +
+        "Container DB Path: %s\n" +
+        "Container Path: %s\n" +
+        "Container Metadata: {%s}\n" +
+        "LeaderID: %s\n" +
+        "Datanodes: [%s]\n";
+
+    // Test a non-exist container
+    String cname = "nonExistContainer";
+    String[] info = {"-container", "-info", cname};
+    int exitCode = runCommandAndGetOutput(info, null, null);
+    assertEquals(ResultCode.EXECUTION_ERROR, exitCode);
+
+    // Create an empty container.
+    cname = "ContainerTestInfo1";
+    Pipeline pipeline = scm.allocateContainer(cname);
+    ContainerData data = new ContainerData(cname);
+    containerManager.createContainer(pipeline, data);
+
+    info = new String[]{"-container", "-info", cname};
+    ByteArrayOutputStream out = new ByteArrayOutputStream();
+    exitCode = runCommandAndGetOutput(info, out, null);
+    assertEquals(ResultCode.SUCCESS, exitCode);
+
+    String openStatus = data.isOpen() ? "OPEN" : "CLOSED";
+    String expected = String.format(formatStr, cname, openStatus,
+        data.getDBPath(), data.getContainerPath(), "",
+        datanodeID.getHostName(), datanodeID.getHostName());
+    assertEquals(expected, out.toString());
+
+    out.reset();
+
+    // Create an non-empty container
+    cname = "ContainerTestInfo2";
+    pipeline = scm.allocateContainer(cname);
+    data = new ContainerData(cname);
+    containerManager.createContainer(pipeline, data);
+    KeyUtils.getDB(data, conf).put(cname.getBytes(),
+        "someKey".getBytes());
+
+    info = new String[]{"-container", "-info", cname};
+    exitCode = runCommandAndGetOutput(info, out, null);
+    assertEquals(ResultCode.SUCCESS, exitCode);
+
+    openStatus = data.isOpen() ? "OPEN" : "CLOSED";
+    expected = String.format(formatStr, cname, openStatus,
+        data.getDBPath(), data.getContainerPath(), "",
+        datanodeID.getHostName(), datanodeID.getHostName());
+    assertEquals(expected, out.toString());
+
+    out.reset();
+
+    // Create a container with some meta data.
+    cname = "ContainerTestInfo3";
+    pipeline = scm.allocateContainer(cname);
+    data = new ContainerData(cname);
+    data.addMetadata("VOLUME", "shire");
+    data.addMetadata("owner", "bilbo");
+    containerManager.createContainer(pipeline, data);
+    KeyUtils.getDB(data, conf).put(cname.getBytes(),
+        "someKey".getBytes());
+
+    List<String> metaList = data.getAllMetadata().entrySet().stream()
+        .map(entry -> entry.getKey() + ":" + entry.getValue())
+        .collect(Collectors.toList());
+    String metadataStr = StringUtils.join(", ", metaList);
+
+    info = new String[]{"-container", "-info", cname};
+    exitCode = runCommandAndGetOutput(info, out, null);
+    assertEquals(ResultCode.SUCCESS, exitCode);
+
+    openStatus = data.isOpen() ? "OPEN" : "CLOSED";
+    expected = String.format(formatStr, cname, openStatus,
+        data.getDBPath(), data.getContainerPath(), metadataStr,
+        datanodeID.getHostName(), datanodeID.getHostName());
+    assertEquals(expected, out.toString());
+
+    out.reset();
+
+    // Close last container and test info again.
+    containerManager.closeContainer(cname);
+
+    info = new String[]{"-container", "-info", cname};
+    exitCode = runCommandAndGetOutput(info, out, null);
+    assertEquals(ResultCode.SUCCESS, exitCode);
+    data = containerManager.readContainer(cname);
+
+    openStatus = data.isOpen() ? "OPEN" : "CLOSED";
+    expected = String.format(formatStrWithHash, cname, openStatus,
+        data.getHash(), data.getDBPath(), data.getContainerPath(),
+        metadataStr, datanodeID.getHostName(), datanodeID.getHostName());
+    assertEquals(expected, out.toString());
+  }
+
   @Test
   public void testNonExistCommand() throws Exception {
     PrintStream init = System.out;
@@ -255,8 +374,10 @@ public class TestSCMCli {
     String expected1 =
         "usage: hdfs scm -container <commands> <options>\n" +
         "where <commands> can be one of the following\n" +
-        " -create      Create container\n" +
-        " -del <arg>   Delete container\n";
+        " -create       Create container\n" +
+        " -del <arg>    Delete container\n" +
+        " -info <arg>   Info container\n";
+
     assertEquals(expected1, testContent.toString());
     testContent.reset();