Bläddra i källkod

HDDS-2022. Add additional freon tests

Closes #1341
Márton Elek 5 år sedan
förälder
incheckning
15fded2788

+ 9 - 0
hadoop-ozone/tools/pom.xml

@@ -91,6 +91,15 @@ https://maven.apache.org/xsd/maven-4.0.0.xsd">
       <artifactId>jmh-generator-annprocess</artifactId>
       <version>1.19</version>
     </dependency>
+    <dependency>
+      <groupId>io.dropwizard.metrics</groupId>
+      <artifactId>metrics-core</artifactId>
+    </dependency>
+    <dependency>
+      <groupId>com.amazonaws</groupId>
+      <artifactId>aws-java-sdk-s3</artifactId>
+      <version>1.11.615</version>
+    </dependency>
     <dependency>
       <groupId>com.google.code.findbugs</groupId>
       <artifactId>findbugs</artifactId>

+ 333 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/BaseFreonGenerator.java

@@ -0,0 +1,333 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.net.InetSocketAddress;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.TimeUnit;
+import java.util.concurrent.atomic.AtomicLong;
+import java.util.regex.Matcher;
+import java.util.regex.Pattern;
+
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ipc.Client;
+import org.apache.hadoop.ipc.ProtobufRpcEngine;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OmUtils;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+import org.apache.hadoop.ozone.client.OzoneVolume;
+import org.apache.hadoop.ozone.om.exceptions.OMException;
+import org.apache.hadoop.ozone.om.exceptions.OMException.ResultCodes;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolClientSideTranslatorPB;
+import org.apache.hadoop.ozone.om.protocolPB.OzoneManagerProtocolPB;
+import org.apache.hadoop.security.UserGroupInformation;
+
+import com.codahale.metrics.ConsoleReporter;
+import com.codahale.metrics.MetricRegistry;
+import org.apache.commons.codec.digest.DigestUtils;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.apache.ratis.protocol.ClientId;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine.Option;
+import picocli.CommandLine.ParentCommand;
+
+/**
+ * Base class for simplified performance tests.
+ */
+public class BaseFreonGenerator {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(BaseFreonGenerator.class);
+
+  private static final int CHECK_INTERVAL_MILLIS = 1000;
+
+  private static final String DIGEST_ALGORITHM = "MD5";
+
+  private static final Pattern ENV_VARIABLE_IN_PATTERN =
+      Pattern.compile("__(.+?)__");
+
+  @ParentCommand
+  private Freon freonCommand;
+
+  @Option(names = {"-n", "--number-of-tests"},
+      description = "Number of the generated objects.",
+      defaultValue = "1000")
+  private long testNo = 1000;
+
+  @Option(names = {"-t", "--threads", "--thread"},
+      description = "Number of threads used to execute",
+      defaultValue = "10")
+  private int threadNo;
+
+  @Option(names = {"-f", "--fail-at-end"},
+      description = "If turned on, all the tasks will be executed even if "
+          + "there are failures.")
+  private boolean failAtEnd;
+
+  @Option(names = {"-p", "--prefix"},
+      description = "Unique identifier of the test execution. Usually used as"
+          + " a prefix of the generated object names. If empty, a random name"
+          + " will be generated",
+      defaultValue = "")
+  private String prefix = "";
+
+  private MetricRegistry metrics = new MetricRegistry();
+
+  private ExecutorService executor;
+
+  private AtomicLong successCounter;
+
+  private AtomicLong failureCounter;
+
+  private long startTime;
+
+  private PathSchema pathSchema;
+
+  /**
+   * The main logic to execute a test generator.
+   *
+   * @param provider creates the new steps to execute.
+   */
+  public void runTests(TaskProvider provider) {
+
+    executor = Executors.newFixedThreadPool(threadNo);
+
+    ProgressBar progressBar =
+        new ProgressBar(System.out, testNo, successCounter::get);
+    progressBar.start();
+
+    startTime = System.currentTimeMillis();
+    //schedule the execution of all the tasks.
+
+    for (long i = 0; i < testNo; i++) {
+
+      final long counter = i;
+
+      executor.execute(() -> {
+        try {
+
+          //in case of an other failed test, we shouldn't execute more tasks.
+          if (!failAtEnd && failureCounter.get() > 0) {
+            return;
+          }
+
+          provider.executeNextTask(counter);
+          successCounter.incrementAndGet();
+        } catch (Exception e) {
+          failureCounter.incrementAndGet();
+          LOG.error("Error on executing task", e);
+        }
+      });
+    }
+
+    // wait until all tasks are executed
+
+    while (successCounter.get() + failureCounter.get() < testNo && (
+        failureCounter.get() == 0 || failAtEnd)) {
+      try {
+        Thread.sleep(CHECK_INTERVAL_MILLIS);
+      } catch (InterruptedException e) {
+        throw new RuntimeException(e);
+      }
+    }
+
+    //shutdown everything
+    if (failureCounter.get() > 0 && !failAtEnd) {
+      progressBar.terminate();
+    } else {
+      progressBar.shutdown();
+    }
+    executor.shutdown();
+    try {
+      executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS);
+    } catch (Exception ex) {
+      ex.printStackTrace();
+    }
+
+    if (failureCounter.get() > 0) {
+      throw new RuntimeException("One ore more freon test is failed.");
+    }
+  }
+
+  /**
+   * Initialize internal counters, and variables. Call it before runTests.
+   */
+  public void init() {
+
+    successCounter = new AtomicLong(0);
+    failureCounter = new AtomicLong(0);
+
+    if (prefix.length() == 0) {
+      prefix = RandomStringUtils.randomAlphanumeric(10);
+    } else {
+      //replace environment variables to support multi-node execution
+      prefix = resolvePrefix(prefix);
+    }
+    LOG.info("Executing test with prefix {}", prefix);
+
+    pathSchema = new PathSchema(prefix);
+
+    Runtime.getRuntime().addShutdownHook(
+        new Thread(this::printReport));
+  }
+
+  /**
+   * Resolve environment variables in the prefixes.
+   */
+  public String resolvePrefix(String inputPrefix) {
+    Matcher m = ENV_VARIABLE_IN_PATTERN.matcher(inputPrefix);
+    StringBuffer sb = new StringBuffer();
+    while (m.find()) {
+      String environment = System.getenv(m.group(1));
+      m.appendReplacement(sb, environment != null ? environment : "");
+    }
+    m.appendTail(sb);
+    return sb.toString();
+  }
+
+  /**
+   * Print out reports from the executed tests.
+   */
+  public void printReport() {
+    ConsoleReporter reporter = ConsoleReporter.forRegistry(metrics).build();
+    reporter.report();
+    System.out.println("Total execution time (sec): " + Math
+        .round((System.currentTimeMillis() - startTime) / 1000.0));
+    System.out.println("Failures: " + failureCounter.get());
+    System.out.println("Successful executions: " + successCounter.get());
+  }
+
+  /**
+   * Create the OM RPC client to use it for testing.
+   */
+  public OzoneManagerProtocolClientSideTranslatorPB createOmClient(
+      OzoneConfiguration conf) throws IOException {
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    long omVersion = RPC.getProtocolVersion(OzoneManagerProtocolPB.class);
+    InetSocketAddress omAddress = OmUtils.getOmAddressForClients(conf);
+    RPC.setProtocolEngine(conf, OzoneManagerProtocolPB.class,
+        ProtobufRpcEngine.class);
+    String clientId = ClientId.randomId().toString();
+    return new OzoneManagerProtocolClientSideTranslatorPB(
+        RPC.getProxy(OzoneManagerProtocolPB.class, omVersion, omAddress,
+            ugi, conf, NetUtils.getDefaultSocketFactory(conf),
+            Client.getRpcTimeout(conf)), clientId);
+  }
+
+  /**
+   * Generate a key/file name based on the prefix and counter.
+   */
+  public String generateObjectName(long counter) {
+    return pathSchema.getPath(counter);
+  }
+
+  /**
+   * Create missing target volume/bucket.
+   */
+  public void ensureVolumeAndBucketExist(OzoneConfiguration ozoneConfiguration,
+      String volumeName, String bucketName) throws IOException {
+
+    try (OzoneClient rpcClient = OzoneClientFactory
+        .getRpcClient(ozoneConfiguration)) {
+
+      OzoneVolume volume = null;
+      try {
+        volume = rpcClient.getObjectStore().getVolume(volumeName);
+      } catch (OMException ex) {
+        if (ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) {
+          rpcClient.getObjectStore().createVolume(volumeName);
+          volume = rpcClient.getObjectStore().getVolume(volumeName);
+        } else {
+          throw ex;
+        }
+      }
+
+      try {
+        volume.getBucket(bucketName);
+      } catch (OMException ex) {
+        if (ex.getResult() == ResultCodes.BUCKET_NOT_FOUND) {
+          volume.createBucket(bucketName);
+        }
+        throw ex;
+      }
+    }
+  }
+
+  /**
+   * Create missing target volume.
+   */
+  public void ensureVolumeExists(
+      OzoneConfiguration ozoneConfiguration,
+      String volumeName) throws IOException {
+    try (OzoneClient rpcClient = OzoneClientFactory
+        .getRpcClient(ozoneConfiguration)) {
+
+      try {
+        rpcClient.getObjectStore().getVolume(volumeName);
+      } catch (OMException ex) {
+        if (ex.getResult() == ResultCodes.VOLUME_NOT_FOUND) {
+          rpcClient.getObjectStore().createVolume(volumeName);
+        }
+      }
+
+    }
+  }
+
+  /**
+   * Calculate checksum of a byte array.
+   */
+  public byte[] getDigest(byte[] content) throws IOException {
+    DigestUtils dig = new DigestUtils(DIGEST_ALGORITHM);
+    dig.getMessageDigest().reset();
+    return dig.digest(content);
+  }
+
+  /**
+   * Calculate checksum of an Input stream.
+   */
+  public byte[] getDigest(InputStream stream) throws IOException {
+    DigestUtils dig = new DigestUtils(DIGEST_ALGORITHM);
+    dig.getMessageDigest().reset();
+    return dig.digest(stream);
+  }
+
+  public String getPrefix() {
+    return prefix;
+  }
+
+  public MetricRegistry getMetrics() {
+    return metrics;
+  }
+
+  public OzoneConfiguration createOzoneConfiguration() {
+    return freonCommand.createOzoneConfiguration();
+  }
+  /**
+   * Simple contract to execute a new step during a freon test.
+   */
+  @FunctionalInterface
+  public interface TaskProvider {
+    void executeNextTask(long step) throws Exception;
+  }
+
+}

+ 62 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/ContentGenerator.java

@@ -0,0 +1,62 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.io.IOException;
+import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
+
+import org.apache.commons.lang3.RandomStringUtils;
+
+/**
+ * Utility class to write random keys from a limited buffer.
+ */
+public class ContentGenerator {
+
+  /**
+   * Size of the destination object (key or file).
+   */
+  private int keySize;
+
+  /**
+   * Buffer for the pre-allocated content (will be reused if less than the
+   * keySize).
+   */
+  private int bufferSize;
+
+  private final byte[] buffer;
+
+  ContentGenerator(int keySize, int bufferSize) {
+    this.keySize = keySize;
+    this.bufferSize = bufferSize;
+
+    buffer = RandomStringUtils.randomAscii(bufferSize)
+        .getBytes(StandardCharsets.UTF_8);
+
+  }
+
+  /**
+   * Write the required bytes to the output stream.
+   */
+  public void write(OutputStream outputStream) throws IOException {
+    for (long nrRemaining = keySize;
+         nrRemaining > 0; nrRemaining -= bufferSize) {
+      int curSize = (int) Math.min(bufferSize, nrRemaining);
+      outputStream.write(buffer, 0, curSize);
+    }
+  }
+}

+ 10 - 1
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/Freon.java

@@ -34,7 +34,16 @@ import picocli.CommandLine.Option;
 @Command(
     name = "ozone freon",
     description = "Load generator and tester tool for ozone",
-    subcommands = RandomKeyGenerator.class,
+    subcommands = {
+        RandomKeyGenerator.class,
+        OzoneClientKeyGenerator.class,
+        OzoneClientKeyValidator.class,
+        OmKeyGenerator.class,
+        OmBucketGenerator.class,
+        HadoopFsGenerator.class,
+        HadoopFsValidator.class,
+        SameKeyReader.class,
+        S3KeyGenerator.class},
     versionProvider = HddsVersionProvider.class,
     mixinStandardHelpOptions = true)
 public class Freon extends GenericCli {

+ 99 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsGenerator.java

@@ -0,0 +1,99 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.net.URI;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import com.codahale.metrics.Timer;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Data generator tool test om performance.
+ */
+@Command(name = "dfsg",
+    aliases = "dfs-file-generator",
+    description = "Create random files to the any dfs compatible file system.",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class HadoopFsGenerator extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HadoopFsGenerator.class);
+
+  @Option(names = {"--path"},
+      description = "Hadoop FS file system path",
+      defaultValue = "o3fs://bucket1.vol1")
+  private String rootPath;
+
+  @Option(names = {"-s", "--size"},
+      description = "Size of the generated files (in bytes)",
+      defaultValue = "10240")
+  private int fileSize;
+
+  @Option(names = {"--buffer"},
+      description = "Size of buffer used to generated the key content.",
+      defaultValue = "4096")
+  private int bufferSize;
+
+  private ContentGenerator contentGenerator;
+
+  private Timer timer;
+
+  private FileSystem fileSystem;
+
+  @Override
+  public Void call() throws Exception {
+
+    init();
+
+    OzoneConfiguration configuration = createOzoneConfiguration();
+
+    fileSystem = FileSystem.get(URI.create(rootPath), configuration);
+
+    contentGenerator = new ContentGenerator(fileSize, bufferSize);
+
+    timer = getMetrics().timer("file-create");
+
+    runTests(this::createFile);
+
+    return null;
+  }
+
+  private void createFile(long counter) throws Exception {
+    Path file = new Path(rootPath + "/" + generateObjectName(counter));
+    fileSystem.mkdirs(file.getParent());
+
+    timer.time(() -> {
+      try (FSDataOutputStream output = fileSystem.create(file)) {
+        contentGenerator.write(output);
+      }
+      return null;
+    });
+  }
+}

+ 100 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/HadoopFsValidator.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.net.URI;
+import java.security.MessageDigest;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import com.codahale.metrics.Timer;
+import org.apache.commons.io.IOUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Data generator tool test om performance.
+ */
+@Command(name = "dfsv",
+    aliases = "dfs-file-validator",
+    description = "Validate if the generated files have the same hash.",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class HadoopFsValidator extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(HadoopFsValidator.class);
+
+  @Option(names = {"--path"},
+      description = "Hadoop FS file system path",
+      defaultValue = "o3fs://bucket1.vol1")
+  private String rootPath;
+
+  private ContentGenerator contentGenerator;
+
+  private Timer timer;
+
+  private FileSystem fileSystem;
+
+  private byte[] referenceDigest;
+
+  @Override
+  public Void call() throws Exception {
+
+    init();
+
+    OzoneConfiguration configuration = createOzoneConfiguration();
+
+    fileSystem = FileSystem.get(URI.create(rootPath), configuration);
+
+    Path file = new Path(rootPath + "/" + generateObjectName(0));
+    try (FSDataInputStream stream = fileSystem.open(file)) {
+      referenceDigest = getDigest(stream);
+    }
+
+    timer = getMetrics().timer("file-read");
+
+    runTests(this::validateFile);
+
+    return null;
+  }
+
+  private void validateFile(long counter) throws Exception {
+    Path file = new Path(rootPath + "/" + generateObjectName(counter));
+
+    byte[] content = timer.time(() -> {
+      try (FSDataInputStream input = fileSystem.open(file)) {
+        return IOUtils.toByteArray(input);
+      }
+    });
+
+    if (!MessageDigest.isEqual(referenceDigest, getDigest(content))) {
+      throw new IllegalStateException(
+          "Reference (=first) message digest doesn't match with digest of "
+              + file.toString());
+    }
+  }
+}

+ 85 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmBucketGenerator.java

@@ -0,0 +1,85 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.StorageType;
+import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+
+import com.codahale.metrics.Timer;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Data generator tool test om performance.
+ */
+@Command(name = "ombg",
+    aliases = "om-bucket-generator",
+    description = "Generate ozone buckets on OM side.",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class OmBucketGenerator extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  @Option(names = {"-v", "--volume"},
+      description = "Name of the bucket which contains the test data. Will be"
+          + " created if missing.",
+      defaultValue = "vol1")
+  private String volumeName;
+
+  private OzoneManagerProtocol ozoneManagerClient;
+
+  private Timer bucketCreationTimer;
+
+  @Override
+  public Void call() throws Exception {
+
+    init();
+
+    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
+
+    ozoneManagerClient = createOmClient(ozoneConfiguration);
+
+    ensureVolumeExists(ozoneConfiguration, volumeName);
+
+    bucketCreationTimer = getMetrics().timer("bucket-create");
+
+    runTests(this::createBucket);
+
+    return null;
+  }
+
+  private void createBucket(long index) throws Exception {
+
+    OmBucketInfo bucketInfo = new OmBucketInfo.Builder()
+        .setBucketName(getPrefix()+index)
+        .setVolumeName(volumeName)
+        .setStorageType(StorageType.DISK)
+        .build();
+
+    bucketCreationTimer.time(() -> {
+      ozoneManagerClient.createBucket(bucketInfo);
+      return null;
+    });
+  }
+
+}

+ 100 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OmKeyGenerator.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.util.ArrayList;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationFactor;
+import org.apache.hadoop.hdds.protocol.proto.HddsProtos.ReplicationType;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs.Builder;
+import org.apache.hadoop.ozone.om.helpers.OpenKeySession;
+import org.apache.hadoop.ozone.om.protocol.OzoneManagerProtocol;
+
+import com.codahale.metrics.Timer;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Data generator tool test om performance.
+ */
+@Command(name = "omkg",
+    aliases = "om-key-generator",
+    description = "Create keys to the om metadata table.",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class OmKeyGenerator extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  @Option(names = {"-v", "--volume"},
+      description = "Name of the bucket which contains the test data. Will be"
+          + " created if missing.",
+      defaultValue = "vol1")
+  private String volumeName;
+
+  @Option(names = {"-b", "--bucket"},
+      description = "Name of the bucket which contains the test data. Will be"
+          + " created if missing.",
+      defaultValue = "bucket1")
+  private String bucketName;
+
+  private OzoneManagerProtocol ozoneManagerClient;
+
+  private Timer timer;
+
+  @Override
+  public Void call() throws Exception {
+
+    init();
+
+    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
+
+    ensureVolumeAndBucketExist(ozoneConfiguration, volumeName, bucketName);
+
+    ozoneManagerClient = createOmClient(ozoneConfiguration);
+
+    timer = getMetrics().timer("key-create");
+
+    runTests(this::createKey);
+
+    return null;
+  }
+
+  private void createKey(long counter) throws Exception {
+
+    OmKeyArgs keyArgs = new Builder()
+        .setBucketName(bucketName)
+        .setVolumeName(volumeName)
+        .setType(ReplicationType.RATIS)
+        .setFactor(ReplicationFactor.THREE)
+        .setKeyName(generateObjectName(counter))
+        .setLocationInfoList(new ArrayList<>())
+        .build();
+
+    timer.time(() -> {
+      OpenKeySession openKeySession = ozoneManagerClient.openKey(keyArgs);
+
+      ozoneManagerClient.commitKey(keyArgs, openKeySession.getId());
+      return null;
+    });
+  }
+
+}

+ 114 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyGenerator.java

@@ -0,0 +1,114 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.io.OutputStream;
+import java.util.HashMap;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.client.ReplicationFactor;
+import org.apache.hadoop.hdds.client.ReplicationType;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+
+import com.codahale.metrics.Timer;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Data generator tool test om performance.
+ */
+@Command(name = "ockg",
+    aliases = "ozone-client-key-generator",
+    description = "Generate keys with the help of the ozone clients.",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class OzoneClientKeyGenerator extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  @Option(names = {"-v", "--volume"},
+      description = "Name of the bucket which contains the test data. Will be"
+          + " created if missing.",
+      defaultValue = "vol1")
+  private String volumeName;
+
+  @Option(names = {"-b", "--bucket"},
+      description = "Name of the bucket which contains the test data. Will be"
+          + " created if missing.",
+      defaultValue = "bucket1")
+  private String bucketName;
+
+  @Option(names = {"-s", "--size"},
+      description = "Size of the generated key (in bytes)",
+      defaultValue = "10240")
+  private int keySize;
+
+  @Option(names = {"--buffer"},
+      description = "Size of buffer used to generated the key content.",
+      defaultValue = "4096")
+  private int bufferSize;
+
+  private Timer timer;
+
+  private OzoneBucket bucket;
+  private ContentGenerator contentGenerator;
+
+  @Override
+  public Void call() throws Exception {
+
+    init();
+
+    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
+
+    ensureVolumeAndBucketExist(ozoneConfiguration, volumeName, bucketName);
+
+    contentGenerator = new ContentGenerator(keySize, bufferSize);
+
+    try (OzoneClient rpcClient = OzoneClientFactory
+        .getRpcClient(ozoneConfiguration)) {
+
+      bucket =
+          rpcClient.getObjectStore().getVolume(volumeName)
+              .getBucket(bucketName);
+
+      timer = getMetrics().timer("key-create");
+
+      runTests(this::createKey);
+
+    }
+    return null;
+  }
+
+  private void createKey(long counter) throws Exception {
+
+    timer.time(() -> {
+      try (OutputStream stream = bucket
+          .createKey(generateObjectName(counter), keySize,
+              ReplicationType.RATIS,
+              ReplicationFactor.THREE,
+              new HashMap<>())) {
+        contentGenerator.write(stream);
+        stream.flush();
+      }
+      return null;
+    });
+  }
+}

+ 100 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/OzoneClientKeyValidator.java

@@ -0,0 +1,100 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.io.InputStream;
+import java.security.MessageDigest;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+
+import com.codahale.metrics.Timer;
+import org.apache.commons.io.IOUtils;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Data generator tool test om performance.
+ */
+@Command(name = "ockv",
+    aliases = "ozone-client-key-validator",
+    description = "Validate keys with the help of the ozone clients.",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class OzoneClientKeyValidator extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  @Option(names = {"-v", "--volume"},
+      description = "Name of the bucket which contains the test data. Will be"
+          + " created if missing.",
+      defaultValue = "vol1")
+  private String volumeName;
+
+  @Option(names = {"-b", "--bucket"},
+      description = "Name of the bucket which contains the test data.",
+      defaultValue = "bucket1")
+  private String bucketName;
+
+  private Timer timer;
+
+  private byte[] referenceDigest;
+
+  private OzoneClient rpcClient;
+
+  @Override
+  public Void call() throws Exception {
+
+    init();
+
+    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
+
+    rpcClient = OzoneClientFactory.getRpcClient(ozoneConfiguration);
+
+    try (InputStream stream = rpcClient.getObjectStore().getVolume(volumeName)
+        .getBucket(bucketName).readKey(generateObjectName(0))) {
+      referenceDigest = getDigest(stream);
+    }
+
+    timer = getMetrics().timer("key-validate");
+
+    runTests(this::validateKey);
+
+    return null;
+  }
+
+  private void validateKey(long counter) throws Exception {
+    String objectName = generateObjectName(counter);
+
+    byte[] content = timer.time(() -> {
+      try (InputStream stream = rpcClient.getObjectStore().getVolume(volumeName)
+          .getBucket(bucketName).readKey(objectName)) {
+        return IOUtils.toByteArray(stream);
+      }
+    });
+    if (!MessageDigest.isEqual(referenceDigest, getDigest(content))) {
+      throw new IllegalStateException(
+          "Reference (=first) message digest doesn't match with digest of "
+              + objectName);
+    }
+  }
+
+}

+ 38 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/PathSchema.java

@@ -0,0 +1,38 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+/**
+ * Class to generate the path based on a counter.
+ */
+public class PathSchema {
+
+  private String prefix;
+
+  public PathSchema(String prefix) {
+    this.prefix = prefix;
+  }
+
+  /**
+   * Return with a relative path based on the current counter.
+   * <p>
+   * A more advanced implementation can generate deep directory hierarchy.
+   */
+  public String getPath(long counter) {
+    return prefix + "/" + counter;
+  }
+}

+ 110 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/S3KeyGenerator.java

@@ -0,0 +1,110 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+
+import com.amazonaws.auth.EnvironmentVariableCredentialsProvider;
+import com.amazonaws.client.builder.AwsClientBuilder.EndpointConfiguration;
+import com.amazonaws.regions.Regions;
+import com.amazonaws.services.s3.AmazonS3;
+import com.amazonaws.services.s3.AmazonS3ClientBuilder;
+import com.codahale.metrics.Timer;
+import org.apache.commons.lang3.RandomStringUtils;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Generate random keys via the s3 interface.
+ */
+@Command(name = "s3kg",
+    aliases = "s3-key-generator",
+    description = "Create random keys via the s3 interface.",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class S3KeyGenerator extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  private static final Logger LOG =
+      LoggerFactory.getLogger(S3KeyGenerator.class);
+
+  @Option(names = {"-b", "--bucket"},
+      description =
+          "Name of the (S3!) bucket which contains the test data.",
+      defaultValue = "bucket1")
+  private String bucketName;
+
+  @Option(names = {"-s", "--size"},
+      description = "Size of the generated key (in bytes)",
+      defaultValue = "10240")
+  private int fileSize;
+
+  @Option(names = {"-e", "--endpoint"},
+      description = "S3 HTTP endpoint",
+      defaultValue = "http://localhost:9878")
+  private String endpoint;
+
+  private Timer timer;
+
+  private String content;
+
+  private AmazonS3 s3;
+
+  @Override
+  public Void call() throws Exception {
+
+    init();
+
+    AmazonS3ClientBuilder amazonS3ClientBuilder =
+        AmazonS3ClientBuilder.standard()
+            .withCredentials(new EnvironmentVariableCredentialsProvider());
+
+    if (endpoint.length() > 0) {
+      amazonS3ClientBuilder
+          .withPathStyleAccessEnabled(true)
+          .withEndpointConfiguration(new EndpointConfiguration(endpoint, ""));
+
+    } else {
+      amazonS3ClientBuilder.withRegion(Regions.DEFAULT_REGION);
+    }
+
+    s3 = amazonS3ClientBuilder.build();
+
+    content = RandomStringUtils.randomAscii(fileSize);
+
+    timer = getMetrics().timer("key-create");
+
+    runTests(this::createKey);
+
+    return null;
+  }
+
+  private void createKey(long counter) throws Exception {
+    timer.time(() -> {
+
+      s3.putObject(bucketName, generateObjectName(counter),
+          content);
+      return null;
+    });
+  }
+}

+ 105 - 0
hadoop-ozone/tools/src/main/java/org/apache/hadoop/ozone/freon/SameKeyReader.java

@@ -0,0 +1,105 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with this
+ * work for additional information regarding copyright ownership.  The ASF
+ * licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ * <p>
+ * http://www.apache.org/licenses/LICENSE-2.0
+ * <p>
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations under
+ * the License.
+ */
+package org.apache.hadoop.ozone.freon;
+
+import java.io.InputStream;
+import java.security.MessageDigest;
+import java.util.concurrent.Callable;
+
+import org.apache.hadoop.hdds.cli.HddsVersionProvider;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.client.OzoneBucket;
+import org.apache.hadoop.ozone.client.OzoneClient;
+import org.apache.hadoop.ozone.client.OzoneClientFactory;
+
+import com.codahale.metrics.Timer;
+import org.apache.commons.io.IOUtils;
+import picocli.CommandLine.Command;
+import picocli.CommandLine.Option;
+
+/**
+ * Data generator tool test om performance.
+ */
+@Command(name = "ocokr",
+    aliases = "ozone-client-one-key-reader",
+    description = "Read the same key from multiple threads.",
+    versionProvider = HddsVersionProvider.class,
+    mixinStandardHelpOptions = true,
+    showDefaultValues = true)
+public class SameKeyReader extends BaseFreonGenerator
+    implements Callable<Void> {
+
+  @Option(names = {"-v", "--volume"},
+      description = "Name of the bucket which contains the test data. Will be"
+          + " created if missing.",
+      defaultValue = "vol1")
+  private String volumeName;
+
+  @Option(names = {"-b", "--bucket"},
+      description = "Name of the bucket which contains the test data. Will be"
+          + " created if missing.",
+      defaultValue = "bucket1")
+  private String bucketName;
+
+  @Option(names = {"-k", "--key"},
+      required = true,
+      description = "Name of the key read from multiple threads")
+  private String keyName;
+
+  private Timer timer;
+
+  private byte[] referenceDigest;
+
+  private OzoneClient rpcClient;
+
+  @Override
+  public Void call() throws Exception {
+
+    init();
+
+    OzoneConfiguration ozoneConfiguration = createOzoneConfiguration();
+
+    rpcClient = OzoneClientFactory.getRpcClient(ozoneConfiguration);
+
+    try (InputStream stream = rpcClient.getObjectStore().getVolume(volumeName)
+        .getBucket(bucketName).readKey(keyName)) {
+      referenceDigest = getDigest(stream);
+    }
+
+    timer = getMetrics().timer("key-create");
+
+    runTests(this::validateKey);
+
+    return null;
+  }
+
+  private void validateKey(long counter) throws Exception {
+
+    byte[] content = timer.time(() -> {
+      try (InputStream stream = rpcClient.getObjectStore().getVolume(volumeName)
+          .getBucket(bucketName).readKey(keyName)) {
+        return IOUtils.toByteArray(stream);
+      }
+    });
+    if (!MessageDigest.isEqual(referenceDigest, getDigest(content))) {
+      throw new IllegalStateException(
+          "Reference message digest doesn't match with the digest of the same"
+              + " key." + counter);
+    }
+  }
+
+}