Переглянути джерело

HDFS-11756. Ozone : add DEBUG CLI support of blockDB file. Contributed by Chen Liang

Chen Liang 8 роки тому
батько
коміт
6516706eb2

+ 51 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/scm/cli/SQLCLI.java

@@ -24,6 +24,7 @@ import org.apache.commons.cli.OptionBuilder;
 import org.apache.commons.cli.Options;
 import org.apache.commons.cli.ParseException;
 import org.apache.hadoop.conf.Configured;
+import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.ozone.protocol.proto.ContainerProtos.Pipeline;
 import org.apache.hadoop.hdfs.protocol.proto.HdfsProtos;
 import org.apache.hadoop.util.Tool;
@@ -47,6 +48,7 @@ import java.util.HashSet;
 import java.util.Map;
 import java.util.Set;
 
+import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
 
 /**
@@ -61,7 +63,7 @@ public class SQLCLI  extends Configured implements Tool {
   // for container.db
   private static final String CREATE_CONTAINER_INFO =
       "CREATE TABLE containerInfo (" +
-          "containerName TEXT PRIMARY KEY NOT NULL , " +
+          "containerName TEXT PRIMARY KEY NOT NULL, " +
           "leaderUUID TEXT NOT NULL)";
   private static final String CREATE_CONTAINER_MACHINE =
       "CREATE TABLE containerMembers (" +
@@ -88,6 +90,14 @@ public class SQLCLI  extends Configured implements Tool {
   private static final String INSERT_CONTAINER_MEMBERS =
       "INSERT INTO containerMembers (containerName, datanodeUUID) " +
           "VALUES (\"%s\", \"%s\")";
+  // for block.db
+  private static final String CREATE_BLOCK_CONTAINER =
+      "CREATE TABLE blockContainer (" +
+          "blockKey TEXT PRIMARY KEY NOT NULL, " +
+          "containerName TEXT NOT NULL)";
+  private static final String INSERT_BLOCK_CONTAINER =
+      "INSERT INTO blockContainer (blockKey, containerName) " +
+          "VALUES (\"%s\", \"%s\")";
 
 
   private static final Logger LOG =
@@ -153,6 +163,9 @@ public class SQLCLI  extends Configured implements Tool {
     if (dbName.toString().equals(CONTAINER_DB)) {
       LOG.info("Converting container DB");
       convertContainerDB(dbPath, outPath);
+    } else if (dbName.toString().equals(BLOCK_DB)) {
+      LOG.info("Converting block DB");
+      convertBlockDB(dbPath, outPath);
     } else {
       LOG.error("Unrecognized db name {}", dbName);
     }
@@ -201,6 +214,7 @@ public class SQLCLI  extends Configured implements Tool {
    * --------------------------------
    *
    * @param dbPath path to container db.
+   * @param outPath path to output sqlite
    * @throws IOException throws exception.
    */
   private void convertContainerDB(Path dbPath, Path outPath)
@@ -269,6 +283,42 @@ public class SQLCLI  extends Configured implements Tool {
     LOG.info("Insertion completed.");
   }
 
+  /**
+   * Converts block.db to sqlite. This is rather simple db, the schema has only
+   * one table:
+   *
+   * blockContainer
+   * --------------------------
+   * blockKey*  | containerName
+   * --------------------------
+   *
+   * @param dbPath path to container db.
+   * @param outPath path to output sqlite
+   * @throws IOException throws exception.
+   */
+  private void convertBlockDB(Path dbPath, Path outPath) throws Exception {
+    LOG.info("Create tables for sql block db.");
+    File dbFile = dbPath.toFile();
+    org.iq80.leveldb.Options dbOptions = new org.iq80.leveldb.Options();
+    LevelDBStore dbStore = new LevelDBStore(dbFile, dbOptions);
+
+    Connection conn = connectDB(outPath.toString());
+    executeSQL(conn, CREATE_BLOCK_CONTAINER);
+
+    DBIterator iter = dbStore.getIterator();
+    iter.seekToFirst();
+    while (iter.hasNext()) {
+      Map.Entry<byte[], byte[]> entry = iter.next();
+      String blockKey = DFSUtilClient.bytes2String(entry.getKey());
+      String containerName = DFSUtilClient.bytes2String(entry.getValue());
+      String insertBlockContainer = String.format(
+          INSERT_BLOCK_CONTAINER, blockKey, containerName);
+      executeSQL(conn, insertBlockContainer);
+    }
+    closeDB(conn);
+    dbStore.close();
+  }
+
   private CommandLine parseArgs(String[] argv)
       throws ParseException {
     return parser.parse(options, argv);

+ 81 - 10
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/ozone/scm/TestContainerSQLCli.java

@@ -22,7 +22,12 @@ import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.ozone.OzoneConfigKeys;
 import org.apache.hadoop.ozone.OzoneConfiguration;
 import org.apache.hadoop.ozone.OzoneConsts;
+import org.apache.hadoop.ozone.scm.block.BlockManagerImpl;
 import org.apache.hadoop.ozone.scm.cli.SQLCLI;
+import org.apache.hadoop.ozone.scm.container.ContainerMapping;
+import org.apache.hadoop.ozone.scm.node.NodeManager;
+import org.apache.hadoop.scm.ScmConfigKeys;
+import org.apache.hadoop.scm.container.common.helpers.AllocatedBlock;
 import org.apache.hadoop.scm.protocolPB.StorageContainerLocationProtocolClientSideTranslatorPB;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
@@ -36,8 +41,11 @@ import java.sql.ResultSet;
 import java.sql.SQLException;
 import java.sql.Statement;
 import java.util.ArrayList;
+import java.util.HashMap;
 
+import static org.apache.hadoop.ozone.OzoneConsts.BLOCK_DB;
 import static org.apache.hadoop.ozone.OzoneConsts.CONTAINER_DB;
+import static org.apache.hadoop.ozone.OzoneConsts.KB;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
@@ -52,24 +60,65 @@ public class TestContainerSQLCli {
   private static StorageContainerLocationProtocolClientSideTranslatorPB
       storageContainerLocationClient;
 
+  private static ContainerMapping mapping;
+  private static NodeManager nodeManager;
+  private static BlockManagerImpl blockManager;
+
+  private static String pipelineName1;
+  private static String pipelineName2;
+
+  private static HashMap<String, String> blockContainerMap;
+
+  private final static long DEFAULT_BLOCK_SIZE = 4 * KB;
+
   @BeforeClass
   public static void init() throws Exception {
     long datanodeCapacities = 3 * OzoneConsts.TB;
+    blockContainerMap = new HashMap<>();
+
     conf = new OzoneConfiguration();
+    conf.setInt(ScmConfigKeys.OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE, 2);
     cluster = new MiniOzoneCluster.Builder(conf).numDataNodes(1)
         .storageCapacities(new long[] {datanodeCapacities, datanodeCapacities})
         .setHandlerType("distributed").build();
     storageContainerLocationClient =
         cluster.createStorageContainerLocationClient();
     cluster.waitForHeartbeatProcessed();
+    cluster.shutdown();
 
-    // create two containers to be retrieved later.
-    storageContainerLocationClient.allocateContainer(
-        "container0");
-    storageContainerLocationClient.allocateContainer(
-        "container1");
+    nodeManager = cluster.getStorageContainerManager().getScmNodeManager();
+    mapping = new ContainerMapping(conf, nodeManager, 128);
+    blockManager = new BlockManagerImpl(conf, nodeManager, mapping, 128);
+
+    // blockManager.allocateBlock() will create containers if there is none
+    // stored in levelDB. The number of containers to create is the value of
+    // OZONE_SCM_CONTAINER_PROVISION_BATCH_SIZE which we set to 2.
+    // so the first allocateBlock() will create two containers. A random one
+    // is assigned for the block.
+    AllocatedBlock ab1 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE);
+    pipelineName1 = ab1.getPipeline().getContainerName();
+    blockContainerMap.put(ab1.getKey(), pipelineName1);
+
+    AllocatedBlock ab2;
+    // we want the two blocks on the two provisioned containers respectively,
+    // however blockManager picks containers randomly, keep retry until we
+    // assign the second block to the other container. This seems to be the only
+    // way to get the two containers.
+    // although each retry will create a block and assign to a container. So
+    // the size of blockContainerMap will vary each time the test is run.
+    while (true) {
+      ab2 = blockManager.allocateBlock(DEFAULT_BLOCK_SIZE);
+      pipelineName2 = ab2.getPipeline().getContainerName();
+      blockContainerMap.put(ab2.getKey(), pipelineName2);
+      if (!pipelineName2.equals(pipelineName1)) {
+        break;
+      }
+    }
+
+    blockManager.close();
+    mapping.close();
+    nodeManager.close();
 
-    cluster.shutdown();
     cli = new SQLCLI();
   }
 
@@ -78,6 +127,28 @@ public class TestContainerSQLCli {
     IOUtils.cleanup(null, storageContainerLocationClient, cluster);
   }
 
+  @Test
+  public void testConvertBlockDB() throws Exception {
+    String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
+    String dbRootPath = conf.get(OzoneConfigKeys.OZONE_CONTAINER_METADATA_DIRS);
+    String dbPath = dbRootPath + "/" + BLOCK_DB;
+    String[] args = {"-p", dbPath, "-o", dbOutPath};
+
+    cli.run(args);
+
+    Connection conn = connectDB(dbOutPath);
+    String sql = "SELECT * FROM blockContainer";
+    ResultSet rs = executeQuery(conn, sql);
+    while(rs.next()) {
+      String blockKey = rs.getString("blockKey");
+      String containerName = rs.getString("containerName");
+      assertTrue(blockContainerMap.containsKey(blockKey) &&
+          blockContainerMap.remove(blockKey).equals(containerName));
+    }
+    assertEquals(0, blockContainerMap.size());
+    Files.delete(Paths.get(dbOutPath));
+  }
+
   @Test
   public void testConvertContainerDB() throws Exception {
     String dbOutPath = cluster.getDataDirectory() + "/out_sql.db";
@@ -104,8 +175,8 @@ public class TestContainerSQLCli {
       //assertEquals(dnUUID, rs.getString("leaderUUID"));
     }
     assertTrue(containerNames.size() == 2 &&
-        containerNames.contains("container0") &&
-        containerNames.contains("container1"));
+        containerNames.contains(pipelineName1) &&
+        containerNames.contains(pipelineName2));
 
     sql = "SELECT * FROM containerMembers";
     rs = executeQuery(conn, sql);
@@ -115,8 +186,8 @@ public class TestContainerSQLCli {
       //assertEquals(dnUUID, rs.getString("datanodeUUID"));
     }
     assertTrue(containerNames.size() == 2 &&
-        containerNames.contains("container0") &&
-        containerNames.contains("container1"));
+        containerNames.contains(pipelineName1) &&
+        containerNames.contains(pipelineName2));
 
     sql = "SELECT * FROM datanodeInfo";
     rs = executeQuery(conn, sql);