瀏覽代碼

HDDS-816. Create OM metrics for bucket, volume, keys. Contributed by Bharat Viswanadham.

Bharat Viswanadham 6 年之前
父節點
當前提交
c4d0ef6946

+ 3 - 0
hadoop-hdds/common/src/main/java/org/apache/hadoop/ozone/OzoneConsts.java

@@ -226,5 +226,8 @@ public final class OzoneConsts {
   public static final String KEY_LOCATION_INFO = "keyLocationInfo";
 
 
+  // For OM metrics saving to a file
+  public static final String OM_METRICS_FILE = "omMetrics";
+  public static final String OM_METRICS_TEMP_FILE = OM_METRICS_FILE + ".tmp";
 
 }

+ 10 - 0
hadoop-hdds/common/src/main/resources/ozone-default.xml

@@ -1351,5 +1351,15 @@
       service principal. </description>
   </property>
 
+  <property>
+    <name>ozone.om.save.metrics.interval</name>
+    <value>5m</value>
+    <tag>OZONE, OM</tag>
+    <description>Time interval used to store the omMetrics in to a
+      file. Background thread perodically stores the OM metrics in to a
+      file. Unit could be defined with postfix (ns,ms,s,m,h,d)
+    </description>
+  </property>
+
 
 </configuration>

+ 5 - 0
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMConfigKeys.java

@@ -18,6 +18,7 @@
 package org.apache.hadoop.ozone.om;
 
 import org.apache.hadoop.ozone.OzoneAcl;
+
 /**
  * Ozone Manager Constants.
  */
@@ -81,4 +82,8 @@ public final class OMConfigKeys {
   public static final String OZONE_KEY_DELETING_LIMIT_PER_TASK =
       "ozone.key.deleting.limit.per.task";
   public static final int OZONE_KEY_DELETING_LIMIT_PER_TASK_DEFAULT = 1000;
+
+  public static final String OZONE_OM_METRICS_SAVE_INTERVAL =
+      "ozone.om.save.metrics.interval";
+  public static final String OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT = "5m";
 }

+ 14 - 1
hadoop-ozone/common/src/main/java/org/apache/hadoop/ozone/om/OMMetadataManager.java

@@ -17,6 +17,7 @@
 package org.apache.hadoop.ozone.om;
 
 import com.google.common.annotations.VisibleForTesting;
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.helpers.OmBucketInfo;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -33,8 +34,11 @@ import java.util.List;
 public interface OMMetadataManager {
   /**
    * Start metadata manager.
+   *
+   * @param configuration
+   * @throws IOException
    */
-  void start();
+  void start(OzoneConfiguration configuration) throws IOException;
 
   /**
    * Stop metadata manager.
@@ -242,4 +246,13 @@ public interface OMMetadataManager {
    * @return Table.
    */
   Table getS3Table();
+
+  /**
+   * Returns number of rows in a table.  This should not be used for very
+   * large tables.
+   * @param table
+   * @return long
+   * @throws IOException
+   */
+  long countRowsInTable(Table table) throws IOException;
 }

+ 128 - 12
hadoop-ozone/integration-test/src/test/java/org/apache/hadoop/ozone/om/TestOmMetrics.java

@@ -18,13 +18,23 @@ package org.apache.hadoop.ozone.om;
 
 import static org.apache.hadoop.test.MetricsAsserts.assertCounter;
 import static org.apache.hadoop.test.MetricsAsserts.getMetrics;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyLong;
 
 import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.concurrent.TimeUnit;
 
+import org.apache.hadoop.hdds.client.BlockID;
+import org.apache.hadoop.hdds.client.ContainerBlockID;
 import org.apache.hadoop.hdds.scm.HddsWhiteboxTestUtils;
 import org.apache.hadoop.metrics2.MetricsRecordBuilder;
 import org.apache.hadoop.ozone.MiniOzoneCluster;
 import org.apache.hadoop.hdds.conf.OzoneConfiguration;
+import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
+import org.apache.hadoop.ozone.om.helpers.OmKeyLocationInfo;
+import org.apache.hadoop.test.MetricsAsserts;
 import org.junit.After;
 import org.junit.Before;
 import org.junit.Test;
@@ -51,6 +61,8 @@ public class TestOmMetrics {
   @Before
   public void setup() throws Exception {
     OzoneConfiguration conf = new OzoneConfiguration();
+    conf.setTimeDuration(OMConfigKeys.OZONE_OM_METRICS_SAVE_INTERVAL,
+        1000, TimeUnit.MILLISECONDS);
     cluster = MiniOzoneCluster.newBuilder(conf).build();
     cluster.waitForClusterToBeReady();
     ozoneManager = cluster.getOzoneManager();
@@ -66,6 +78,8 @@ public class TestOmMetrics {
     }
   }
 
+
+
   @Test
   public void testVolumeOps() throws IOException {
     VolumeManager volumeManager =
@@ -92,6 +106,16 @@ public class TestOmMetrics {
     assertCounter("NumVolumeCheckAccesses", 1L, omMetrics);
     assertCounter("NumVolumeDeletes", 1L, omMetrics);
     assertCounter("NumVolumeLists", 1L, omMetrics);
+    assertCounter("NumVolumes", 0L, omMetrics);
+
+    ozoneManager.createVolume(null);
+    ozoneManager.createVolume(null);
+    ozoneManager.createVolume(null);
+    ozoneManager.deleteVolume(null);
+
+    omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumVolumes", 2L, omMetrics);
+
 
     // inject exception to test for Failure Metrics
     Mockito.doThrow(exception).when(mockVm).createVolume(null);
@@ -106,12 +130,12 @@ public class TestOmMetrics {
     doVolumeOps();
 
     omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumVolumeOps", 12L, omMetrics);
-    assertCounter("NumVolumeCreates", 2L, omMetrics);
+    assertCounter("NumVolumeOps", 16L, omMetrics);
+    assertCounter("NumVolumeCreates", 5L, omMetrics);
     assertCounter("NumVolumeUpdates", 2L, omMetrics);
     assertCounter("NumVolumeInfos", 2L, omMetrics);
     assertCounter("NumVolumeCheckAccesses", 2L, omMetrics);
-    assertCounter("NumVolumeDeletes", 2L, omMetrics);
+    assertCounter("NumVolumeDeletes", 3L, omMetrics);
     assertCounter("NumVolumeLists", 2L, omMetrics);
 
     assertCounter("NumVolumeCreateFails", 1L, omMetrics);
@@ -120,6 +144,15 @@ public class TestOmMetrics {
     assertCounter("NumVolumeCheckAccessFails", 1L, omMetrics);
     assertCounter("NumVolumeDeleteFails", 1L, omMetrics);
     assertCounter("NumVolumeListFails", 1L, omMetrics);
+
+    // As last call for volumesOps does not increment numVolumes as those are
+    // failed.
+    assertCounter("NumVolumes", 2L, omMetrics);
+
+    cluster.restartOzoneManager();
+    assertCounter("NumVolumes", 2L, omMetrics);
+
+
   }
 
   @Test
@@ -129,6 +162,16 @@ public class TestOmMetrics {
             ozoneManager, "bucketManager");
     BucketManager mockBm = Mockito.spy(bucketManager);
 
+    S3BucketManager s3BucketManager =
+        (S3BucketManager) HddsWhiteboxTestUtils.getInternalState(
+            ozoneManager, "s3BucketManager");
+    S3BucketManager mockS3Bm = Mockito.spy(s3BucketManager);
+
+    Mockito.doNothing().when(mockS3Bm).createS3Bucket("random", "random");
+    Mockito.doNothing().when(mockS3Bm).deleteS3Bucket("random");
+    Mockito.doReturn(true).when(mockS3Bm).createOzoneVolumeIfNeeded(null);
+
+    Mockito.doNothing().when(mockBm).createBucket(null);
     Mockito.doNothing().when(mockBm).createBucket(null);
     Mockito.doNothing().when(mockBm).deleteBucket(null, null);
     Mockito.doReturn(null).when(mockBm).getBucketInfo(null, null);
@@ -146,6 +189,35 @@ public class TestOmMetrics {
     assertCounter("NumBucketInfos", 1L, omMetrics);
     assertCounter("NumBucketDeletes", 1L, omMetrics);
     assertCounter("NumBucketLists", 1L, omMetrics);
+    assertCounter("NumBuckets", 0L, omMetrics);
+
+    ozoneManager.createBucket(null);
+    ozoneManager.createBucket(null);
+    ozoneManager.createBucket(null);
+    ozoneManager.deleteBucket(null, null);
+
+    //Taking already existing value, as the same metrics is used over all the
+    // test cases.
+    long numVolumesOps = MetricsAsserts.getLongCounter("NumVolumeOps",
+        omMetrics);
+    long numVolumes = MetricsAsserts.getLongCounter("NumVolumes",
+        omMetrics);
+    long numVolumeCreates = MetricsAsserts.getLongCounter("NumVolumeCreates",
+        omMetrics);
+
+    ozoneManager.createS3Bucket("random", "random");
+    ozoneManager.createS3Bucket("random1", "random1");
+    ozoneManager.createS3Bucket("random2", "random2");
+    ozoneManager.deleteS3Bucket("random");
+
+    omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumBuckets", 4L, omMetrics);
+
+    assertCounter("NumVolumeOps", numVolumesOps + 3, omMetrics);
+    assertCounter("NumVolumeCreates", numVolumeCreates + 3, omMetrics);
+    assertCounter("NumVolumes", numVolumes + 3, omMetrics);
+
+
 
     // inject exception to test for Failure Metrics
     Mockito.doThrow(exception).when(mockBm).createBucket(null);
@@ -159,11 +231,11 @@ public class TestOmMetrics {
     doBucketOps();
 
     omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumBucketOps", 10L, omMetrics);
-    assertCounter("NumBucketCreates", 2L, omMetrics);
+    assertCounter("NumBucketOps", 18L, omMetrics);
+    assertCounter("NumBucketCreates", 8L, omMetrics);
     assertCounter("NumBucketUpdates", 2L, omMetrics);
     assertCounter("NumBucketInfos", 2L, omMetrics);
-    assertCounter("NumBucketDeletes", 2L, omMetrics);
+    assertCounter("NumBucketDeletes", 4L, omMetrics);
     assertCounter("NumBucketLists", 2L, omMetrics);
 
     assertCounter("NumBucketCreateFails", 1L, omMetrics);
@@ -171,29 +243,49 @@ public class TestOmMetrics {
     assertCounter("NumBucketInfoFails", 1L, omMetrics);
     assertCounter("NumBucketDeleteFails", 1L, omMetrics);
     assertCounter("NumBucketListFails", 1L, omMetrics);
+
+    assertCounter("NumBuckets", 4L, omMetrics);
+
+    cluster.restartOzoneManager();
+    assertCounter("NumBuckets", 4L, omMetrics);
   }
 
   @Test
   public void testKeyOps() throws IOException {
-    KeyManager bucketManager = (KeyManager) HddsWhiteboxTestUtils
+    KeyManager keyManager = (KeyManager) HddsWhiteboxTestUtils
         .getInternalState(ozoneManager, "keyManager");
-    KeyManager mockKm = Mockito.spy(bucketManager);
+    KeyManager mockKm = Mockito.spy(keyManager);
 
     Mockito.doReturn(null).when(mockKm).openKey(null);
     Mockito.doNothing().when(mockKm).deleteKey(null);
     Mockito.doReturn(null).when(mockKm).lookupKey(null);
     Mockito.doReturn(null).when(mockKm).listKeys(null, null, null, null, 0);
+    Mockito.doNothing().when(mockKm).commitKey(any(OmKeyArgs.class), anyLong());
 
     HddsWhiteboxTestUtils.setInternalState(
         ozoneManager, "keyManager", mockKm);
     doKeyOps();
 
     MetricsRecordBuilder omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumKeyOps", 4L, omMetrics);
+    assertCounter("NumKeyOps", 5L, omMetrics);
     assertCounter("NumKeyAllocate", 1L, omMetrics);
     assertCounter("NumKeyLookup", 1L, omMetrics);
     assertCounter("NumKeyDeletes", 1L, omMetrics);
     assertCounter("NumKeyLists", 1L, omMetrics);
+    assertCounter("NumKeys", 0L, omMetrics);
+
+
+    ozoneManager.openKey(null);
+    ozoneManager.commitKey(createKeyArgs(), 0);
+    ozoneManager.openKey(null);
+    ozoneManager.commitKey(createKeyArgs(), 0);
+    ozoneManager.openKey(null);
+    ozoneManager.commitKey(createKeyArgs(), 0);
+    ozoneManager.deleteKey(null);
+
+
+    omMetrics = getMetrics("OMMetrics");
+    assertCounter("NumKeys", 2L, omMetrics);
 
     // inject exception to test for Failure Metrics
     Mockito.doThrow(exception).when(mockKm).openKey(null);
@@ -201,22 +293,30 @@ public class TestOmMetrics {
     Mockito.doThrow(exception).when(mockKm).lookupKey(null);
     Mockito.doThrow(exception).when(mockKm).listKeys(
         null, null, null, null, 0);
+    Mockito.doThrow(exception).when(mockKm).commitKey(any(OmKeyArgs.class),
+        anyLong());
 
     HddsWhiteboxTestUtils.setInternalState(
         ozoneManager, "keyManager", mockKm);
     doKeyOps();
 
     omMetrics = getMetrics("OMMetrics");
-    assertCounter("NumKeyOps", 8L, omMetrics);
-    assertCounter("NumKeyAllocate", 2L, omMetrics);
+    assertCounter("NumKeyOps", 17L, omMetrics);
+    assertCounter("NumKeyAllocate", 5L, omMetrics);
     assertCounter("NumKeyLookup", 2L, omMetrics);
-    assertCounter("NumKeyDeletes", 2L, omMetrics);
+    assertCounter("NumKeyDeletes", 3L, omMetrics);
     assertCounter("NumKeyLists", 2L, omMetrics);
 
     assertCounter("NumKeyAllocateFails", 1L, omMetrics);
     assertCounter("NumKeyLookupFails", 1L, omMetrics);
     assertCounter("NumKeyDeleteFails", 1L, omMetrics);
     assertCounter("NumKeyListFails", 1L, omMetrics);
+
+    assertCounter("NumKeys", 2L, omMetrics);
+
+    cluster.restartOzoneManager();
+    assertCounter("NumKeys", 2L, omMetrics);
+
   }
 
   /**
@@ -307,5 +407,21 @@ public class TestOmMetrics {
       ozoneManager.listKeys(null, null, null, null, 0);
     } catch (IOException ignored) {
     }
+
+    try {
+      ozoneManager.commitKey(createKeyArgs(), 0);
+    } catch (IOException ignored) {
+    }
+  }
+
+  private OmKeyArgs createKeyArgs() {
+    OmKeyLocationInfo keyLocationInfo = new OmKeyLocationInfo.Builder()
+        .setBlockID(new BlockID(new ContainerBlockID(1, 1))).build();
+    keyLocationInfo.setCreateVersion(0);
+    List<OmKeyLocationInfo> omKeyLocationInfoList = new ArrayList<>();
+    omKeyLocationInfoList.add(keyLocationInfo);
+    OmKeyArgs keyArgs = new OmKeyArgs.Builder().setLocationInfoList(
+        omKeyLocationInfoList).build();
+    return keyArgs;
   }
 }

+ 5 - 1
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManager.java

@@ -16,6 +16,7 @@
  */
 package org.apache.hadoop.ozone.om;
 
+import org.apache.hadoop.hdds.conf.OzoneConfiguration;
 import org.apache.hadoop.ozone.common.BlockGroup;
 import org.apache.hadoop.ozone.om.helpers.OmKeyArgs;
 import org.apache.hadoop.ozone.om.helpers.OmKeyInfo;
@@ -33,8 +34,11 @@ public interface KeyManager {
 
   /**
    * Start key manager.
+   *
+   * @param configuration
+   * @throws IOException
    */
-  void start();
+  void start(OzoneConfiguration configuration);
 
   /**
    * Stop key manager.

+ 20 - 15
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/KeyManagerImpl.java

@@ -79,7 +79,7 @@ public class KeyManagerImpl implements KeyManager {
   private final long preallocateMax;
   private final String omId;
 
-  private final BackgroundService keyDeletingService;
+  private BackgroundService keyDeletingService;
 
   public KeyManagerImpl(ScmBlockLocationProtocol scmBlockClient,
       OMMetadataManager metadataManager,
@@ -95,28 +95,33 @@ public class KeyManagerImpl implements KeyManager {
     this.preallocateMax = conf.getLong(
         OZONE_KEY_PREALLOCATION_MAXSIZE,
         OZONE_KEY_PREALLOCATION_MAXSIZE_DEFAULT);
-    long blockDeleteInterval = conf.getTimeDuration(
-        OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
-        OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    long serviceTimeout = conf.getTimeDuration(
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
-        OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
-        TimeUnit.MILLISECONDS);
-    keyDeletingService = new KeyDeletingService(
-        scmBlockClient, this, blockDeleteInterval, serviceTimeout, conf);
-
     this.omId = omId;
+    start(conf);
   }
 
   @Override
-  public void start() {
-    keyDeletingService.start();
+  public void start(OzoneConfiguration configuration) {
+    if (keyDeletingService == null) {
+      long blockDeleteInterval = configuration.getTimeDuration(
+          OZONE_BLOCK_DELETING_SERVICE_INTERVAL,
+          OZONE_BLOCK_DELETING_SERVICE_INTERVAL_DEFAULT,
+          TimeUnit.MILLISECONDS);
+      long serviceTimeout = configuration.getTimeDuration(
+          OZONE_BLOCK_DELETING_SERVICE_TIMEOUT,
+          OZONE_BLOCK_DELETING_SERVICE_TIMEOUT_DEFAULT,
+          TimeUnit.MILLISECONDS);
+      keyDeletingService = new KeyDeletingService(scmBlockClient, this,
+          blockDeleteInterval, serviceTimeout, configuration);
+      keyDeletingService.start();
+    }
   }
 
   @Override
   public void stop() throws IOException {
-    keyDeletingService.shutdown();
+    if (keyDeletingService != null) {
+      keyDeletingService.shutdown();
+      keyDeletingService = null;
+    }
   }
 
   private void validateBucket(String volumeName, String bucketName)

+ 60 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OMMetrics.java

@@ -85,6 +85,17 @@ public class OMMetrics {
   private @Metric MutableCounterLong numGetServiceListFails;
   private @Metric MutableCounterLong numListS3BucketsFails;
 
+  // Metrics for total number of volumes, buckets and keys
+
+  private @Metric MutableCounterLong numVolumes;
+  private @Metric MutableCounterLong numBuckets;
+
+  //TODO: This metric is an estimate and it may be inaccurate on restart if the
+  // OM process was not shutdown cleanly. Key creations/deletions in the last
+  // few minutes before restart may not be included in this count.
+  private @Metric MutableCounterLong numKeys;
+
+
   public OMMetrics() {
   }
 
@@ -95,6 +106,55 @@ public class OMMetrics {
         new OMMetrics());
   }
 
+  public void incNumVolumes() {
+    numVolumes.incr();
+  }
+
+  public void decNumVolumes() {
+    numVolumes.incr(-1);
+  }
+
+  public void incNumBuckets() {
+    numBuckets.incr();
+  }
+
+  public void decNumBuckets() {
+    numBuckets.incr(-1);
+  }
+
+  public void incNumKeys() {
+    numKeys.incr();
+  }
+
+  public void decNumKeys() {
+    numKeys.incr(-1);
+  }
+
+  public void setNumVolumes(long val) {
+    this.numVolumes.incr(val);
+  }
+
+  public void setNumBuckets(long val) {
+    this.numBuckets.incr(val);
+  }
+
+  public void setNumKeys(long val) {
+    this.numKeys.incr(val);
+  }
+
+  public long getNumVolumes() {
+    return numVolumes.value();
+  }
+
+  public long getNumBuckets() {
+    return numBuckets.value();
+  }
+
+  public long getNumKeys() {
+    return numKeys.value();
+  }
+
+
   public void incNumVolumeCreates() {
     numVolumeOps.incr();
     numVolumeCreates.incr();

+ 62 - 44
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetadataManagerImpl.java

@@ -100,59 +100,25 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   private static final String OPEN_KEY_TABLE = "openKeyTable";
   private static final String S3_TABLE = "s3Table";
 
-  private final DBStore store;
+  private DBStore store;
 
   private final OzoneManagerLock lock;
   private final long openKeyExpireThresholdMS;
 
-  private final Table userTable;
-  private final Table volumeTable;
-  private final Table bucketTable;
-  private final Table keyTable;
-  private final Table deletedTable;
-  private final Table openKeyTable;
-  private final Table s3Table;
+  private Table userTable;
+  private Table volumeTable;
+  private Table bucketTable;
+  private Table keyTable;
+  private Table deletedTable;
+  private Table openKeyTable;
+  private Table s3Table;
 
   public OmMetadataManagerImpl(OzoneConfiguration conf) throws IOException {
-    File metaDir = OmUtils.getOmDbDir(conf);
     this.lock = new OzoneManagerLock(conf);
     this.openKeyExpireThresholdMS = 1000L * conf.getInt(
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS,
         OZONE_OPEN_KEY_EXPIRE_THRESHOLD_SECONDS_DEFAULT);
-
-    this.store = DBStoreBuilder.newBuilder(conf)
-        .setName(OM_DB_NAME)
-        .setPath(Paths.get(metaDir.getPath()))
-        .addTable(USER_TABLE)
-        .addTable(VOLUME_TABLE)
-        .addTable(BUCKET_TABLE)
-        .addTable(KEY_TABLE)
-        .addTable(DELETED_TABLE)
-        .addTable(OPEN_KEY_TABLE)
-        .addTable(S3_TABLE)
-        .build();
-
-    userTable = this.store.getTable(USER_TABLE);
-    checkTableStatus(userTable, USER_TABLE);
-
-    volumeTable = this.store.getTable(VOLUME_TABLE);
-    checkTableStatus(volumeTable, VOLUME_TABLE);
-
-    bucketTable = this.store.getTable(BUCKET_TABLE);
-    checkTableStatus(bucketTable, BUCKET_TABLE);
-
-    keyTable = this.store.getTable(KEY_TABLE);
-    checkTableStatus(keyTable, KEY_TABLE);
-
-    deletedTable = this.store.getTable(DELETED_TABLE);
-    checkTableStatus(deletedTable, DELETED_TABLE);
-
-    openKeyTable = this.store.getTable(OPEN_KEY_TABLE);
-    checkTableStatus(openKeyTable, OPEN_KEY_TABLE);
-
-    s3Table = this.store.getTable(S3_TABLE);
-    checkTableStatus(s3Table, S3_TABLE);
-
+    start(conf);
   }
 
   @Override
@@ -206,8 +172,44 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
    * Start metadata manager.
    */
   @Override
-  public void start() {
+  public void start(OzoneConfiguration configuration) throws IOException {
+    // We need to create the DB here, as when during restart, stop closes the
+    // db, so we need to create the store object and initialize DB.
+    if (store == null) {
+      File metaDir = OmUtils.getOmDbDir(configuration);
+      this.store = DBStoreBuilder.newBuilder(configuration)
+          .setName(OM_DB_NAME)
+          .setPath(Paths.get(metaDir.getPath()))
+          .addTable(USER_TABLE)
+          .addTable(VOLUME_TABLE)
+          .addTable(BUCKET_TABLE)
+          .addTable(KEY_TABLE)
+          .addTable(DELETED_TABLE)
+          .addTable(OPEN_KEY_TABLE)
+          .addTable(S3_TABLE)
+          .build();
+
+      userTable = this.store.getTable(USER_TABLE);
+      checkTableStatus(userTable, USER_TABLE);
+
+      volumeTable = this.store.getTable(VOLUME_TABLE);
+      checkTableStatus(volumeTable, VOLUME_TABLE);
 
+      bucketTable = this.store.getTable(BUCKET_TABLE);
+      checkTableStatus(bucketTable, BUCKET_TABLE);
+
+      keyTable = this.store.getTable(KEY_TABLE);
+      checkTableStatus(keyTable, KEY_TABLE);
+
+      deletedTable = this.store.getTable(DELETED_TABLE);
+      checkTableStatus(deletedTable, DELETED_TABLE);
+
+      openKeyTable = this.store.getTable(OPEN_KEY_TABLE);
+      checkTableStatus(openKeyTable, OPEN_KEY_TABLE);
+
+      s3Table = this.store.getTable(S3_TABLE);
+      checkTableStatus(s3Table, S3_TABLE);
+    }
   }
 
   /**
@@ -217,6 +219,7 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
   public void stop() throws Exception {
     if (store != null) {
       store.close();
+      store = null;
     }
   }
 
@@ -627,4 +630,19 @@ public class OmMetadataManagerImpl implements OMMetadataManager {
     }
     return keyBlocksList;
   }
+
+  @Override
+  public long countRowsInTable(Table table) throws IOException {
+    long count = 0;
+    if (table != null) {
+      try (TableIterator<Table.KeyValue> keyValueTableIterator =
+               table.iterator()) {
+        while (keyValueTableIterator.hasNext()) {
+          keyValueTableIterator.next();
+          count++;
+        }
+      }
+    }
+    return count;
+  }
 }

+ 43 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OmMetricsInfo.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ozone.om;
+
+import com.fasterxml.jackson.annotation.JsonProperty;
+
+/**
+ * OmMetricsInfo stored in a file, which will be used during OM restart to
+ * initialize the metrics. Currently this stores only numKeys.
+ */
+public class OmMetricsInfo {
+
+  @JsonProperty
+  private long numKeys;
+
+  OmMetricsInfo() {
+    this.numKeys = 0;
+  }
+
+  public long getNumKeys() {
+    return numKeys;
+  }
+
+  public void setNumKeys(long numKeys) {
+    this.numKeys = numKeys;
+  }
+}

+ 187 - 24
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/OzoneManager.java

@@ -17,6 +17,9 @@
 
 package org.apache.hadoop.ozone.om;
 
+import com.fasterxml.jackson.databind.ObjectMapper;
+import com.fasterxml.jackson.databind.ObjectReader;
+import com.fasterxml.jackson.databind.ObjectWriter;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.protobuf.BlockingService;
@@ -41,6 +44,7 @@ import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.ozone.OmUtils;
 import org.apache.hadoop.ozone.OzoneConsts;
 import org.apache.hadoop.ozone.audit.AuditAction;
 import org.apache.hadoop.ozone.audit.AuditEventStatus;
@@ -67,19 +71,29 @@ import org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.Service
 import org.apache.hadoop.ozone.protocolPB.OzoneManagerProtocolServerSideTranslatorPB;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.GenericOptionsParser;
+import org.apache.hadoop.util.ShutdownHookManager;
 import org.apache.hadoop.util.StringUtils;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import javax.management.ObjectName;
+import java.io.BufferedWriter;
+import java.io.File;
+import java.io.FileOutputStream;
 import java.io.IOException;
+import java.io.OutputStreamWriter;
 import java.io.PrintStream;
 import java.net.InetSocketAddress;
+import java.nio.file.Files;
+import java.nio.file.StandardCopyOption;
 import java.util.ArrayList;
 import java.util.HashMap;
 import java.util.LinkedHashMap;
 import java.util.List;
 import java.util.Map;
+import java.util.Timer;
+import java.util.TimerTask;
+import java.util.concurrent.TimeUnit;
 
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForBlockClients;
 import static org.apache.hadoop.hdds.HddsUtils.getScmAddressForClients;
@@ -87,10 +101,19 @@ import static org.apache.hadoop.hdds.HddsUtils.isHddsEnabled;
 import static org.apache.hadoop.hdds.protocol.proto.HddsProtos.NodeState.HEALTHY;
 import static org.apache.hadoop.hdds.server.ServerUtils.updateRPCListenAddress;
 import static org.apache.hadoop.ozone.OmUtils.getOmAddress;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_FILE;
+import static org.apache.hadoop.ozone.OzoneConsts.OM_METRICS_TEMP_FILE;
 import static org.apache.hadoop.ozone.OzoneConfigKeys.OZONE_ENABLED;
+
 import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_ADDRESS_KEY;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_DEFAULT;
-import static org.apache.hadoop.ozone.om.OMConfigKeys.OZONE_OM_HANDLER_COUNT_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_HANDLER_COUNT_DEFAULT;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_HANDLER_COUNT_KEY;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_METRICS_SAVE_INTERVAL;
+import static org.apache.hadoop.ozone.om.OMConfigKeys
+    .OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT;
 import static org.apache.hadoop.ozone.protocol.proto.OzoneManagerProtocolProtos.OzoneManagerService.newReflectiveBlockingService;
 import static org.apache.hadoop.util.ExitUtil.terminate;
 
@@ -111,19 +134,28 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
           + StartupOption.INIT.getName() + " ]\n " + "ozone om [ "
           + StartupOption.HELP.getName() + " ]\n";
   private final OzoneConfiguration configuration;
-  private final RPC.Server omRpcServer;
-  private final InetSocketAddress omRpcAddress;
+  private RPC.Server omRpcServer;
+  private InetSocketAddress omRpcAddress;
   private final OMMetadataManager metadataManager;
   private final VolumeManager volumeManager;
   private final BucketManager bucketManager;
   private final KeyManager keyManager;
   private final OMMetrics metrics;
-  private final OzoneManagerHttpServer httpServer;
+  private OzoneManagerHttpServer httpServer;
   private final OMStorage omStorage;
   private final ScmBlockLocationProtocol scmBlockClient;
   private final StorageContainerLocationProtocol scmContainerClient;
   private ObjectName omInfoBeanName;
   private final S3BucketManager s3BucketManager;
+  private Timer metricsTimer;
+  private ScheduleOMMetricsWriteTask scheduleOMMetricsWriteTask;
+  private static final ObjectWriter WRITER =
+      new ObjectMapper().writerWithDefaultPrettyPrinter();
+  private static final ObjectReader READER =
+      new ObjectMapper().readerFor(OmMetricsInfo.class);
+  private static final int SHUTDOWN_HOOK_PRIORITY = 30;
+  private final Runnable shutdownHook;
+  private final File omMetaDir;
 
   private OzoneManager(OzoneConfiguration conf) throws IOException {
     Preconditions.checkNotNull(conf);
@@ -143,33 +175,79 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       throw new OMException("SCM version info mismatch.",
           ResultCodes.SCM_VERSION_MISMATCH_ERROR);
     }
-    final int handlerCount = conf.getInt(OZONE_OM_HANDLER_COUNT_KEY,
-        OZONE_OM_HANDLER_COUNT_DEFAULT);
 
     RPC.setProtocolEngine(configuration, OzoneManagerProtocolPB.class,
         ProtobufRpcEngine.class);
 
-    BlockingService omService = newReflectiveBlockingService(
-        new OzoneManagerProtocolServerSideTranslatorPB(this));
-    final InetSocketAddress omNodeRpcAddr =
-        getOmAddress(configuration);
-    omRpcServer = startRpcServer(configuration, omNodeRpcAddr,
-        OzoneManagerProtocolPB.class, omService,
-        handlerCount);
-    omRpcAddress = updateRPCListenAddress(configuration,
-        OZONE_OM_ADDRESS_KEY, omNodeRpcAddr, omRpcServer);
     metadataManager = new OmMetadataManagerImpl(configuration);
     volumeManager = new VolumeManagerImpl(metadataManager, configuration);
     bucketManager = new BucketManagerImpl(metadataManager);
+    metrics = OMMetrics.create();
+
     s3BucketManager = new S3BucketManagerImpl(configuration, metadataManager,
         volumeManager, bucketManager);
-    metrics = OMMetrics.create();
     keyManager =
         new KeyManagerImpl(scmBlockClient, metadataManager, configuration,
             omStorage.getOmId());
-    httpServer = new OzoneManagerHttpServer(configuration, this);
+
+    shutdownHook = () -> {
+      saveOmMetrics();
+    };
+    ShutdownHookManager.get().addShutdownHook(shutdownHook,
+        SHUTDOWN_HOOK_PRIORITY);
+
+    omMetaDir = OmUtils.getOmDbDir(configuration);
+
+  }
+
+  /**
+   * Class which schedule saving metrics to a file.
+   */
+  private class ScheduleOMMetricsWriteTask extends TimerTask {
+    public void run() {
+      saveOmMetrics();
+    }
+  }
+
+  private void saveOmMetrics() {
+    try {
+      boolean success;
+      try (BufferedWriter writer = new BufferedWriter(
+          new OutputStreamWriter(new FileOutputStream(
+              getTempMetricsStorageFile()), "UTF-8"))) {
+        OmMetricsInfo metricsInfo = new OmMetricsInfo();
+        metricsInfo.setNumKeys(metrics.getNumKeys());
+        WRITER.writeValue(writer, metricsInfo);
+        success = true;
+      }
+
+      if (success) {
+        Files.move(getTempMetricsStorageFile().toPath(),
+            getMetricsStorageFile().toPath(), StandardCopyOption
+                .ATOMIC_MOVE, StandardCopyOption.REPLACE_EXISTING);
+      }
+    } catch (IOException ex) {
+      LOG.error("Unable to write the om Metrics file", ex);
+    }
+  }
+
+  /**
+   * Returns temporary metrics storage file.
+   * @return File
+   */
+  private File getTempMetricsStorageFile() {
+    return new File(omMetaDir, OM_METRICS_TEMP_FILE);
+  }
+
+  /**
+   * Returns metrics storage file.
+   * @return File
+   */
+  private File getMetricsStorageFile() {
+    return new File(omMetaDir, OM_METRICS_FILE);
   }
 
+
   /**
    * Create a scm block client, used by putKey() and getKey().
    *
@@ -448,12 +526,49 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    * Start service.
    */
   public void start() throws IOException {
+
+    InetSocketAddress omNodeRpcAddr = getOmAddress(configuration);
+    int handlerCount = configuration.getInt(OZONE_OM_HANDLER_COUNT_KEY,
+        OZONE_OM_HANDLER_COUNT_DEFAULT);
+    BlockingService omService = newReflectiveBlockingService(
+        new OzoneManagerProtocolServerSideTranslatorPB(this));
+    omRpcServer = startRpcServer(configuration, omNodeRpcAddr,
+        OzoneManagerProtocolPB.class, omService,
+        handlerCount);
+    omRpcAddress = updateRPCListenAddress(configuration,
+        OZONE_OM_ADDRESS_KEY, omNodeRpcAddr, omRpcServer);
+    omRpcServer.start();
+
     LOG.info(buildRpcServerStartMessage("OzoneManager RPC server",
         omRpcAddress));
+
+
     DefaultMetricsSystem.initialize("OzoneManager");
-    metadataManager.start();
-    keyManager.start();
-    omRpcServer.start();
+
+    metadataManager.start(configuration);
+
+
+    // Set metrics and start metrics back ground thread
+    metrics.setNumVolumes(metadataManager.countRowsInTable(metadataManager
+        .getVolumeTable()));
+    metrics.setNumBuckets(metadataManager.countRowsInTable(metadataManager
+        .getBucketTable()));
+
+    if (getMetricsStorageFile().exists()) {
+      OmMetricsInfo metricsInfo = READER.readValue(getMetricsStorageFile());
+      metrics.setNumKeys(metricsInfo.getNumKeys());
+    }
+
+    // Schedule save metrics
+    long period = configuration.getTimeDuration(OZONE_OM_METRICS_SAVE_INTERVAL,
+        OZONE_OM_METRICS_SAVE_INTERVAL_DEFAULT, TimeUnit.MILLISECONDS);
+    scheduleOMMetricsWriteTask = new ScheduleOMMetricsWriteTask();
+    metricsTimer = new Timer();
+    metricsTimer.schedule(scheduleOMMetricsWriteTask, 0, period);
+
+    keyManager.start(configuration);
+
+    httpServer = new OzoneManagerHttpServer(configuration, this);
     httpServer.start();
     registerMXBean();
     setStartTime();
@@ -464,10 +579,14 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    */
   public void stop() {
     try {
-      metadataManager.stop();
+      // Cancel the metrics timer and set to null.
+      metricsTimer.cancel();
+      metricsTimer = null;
+      scheduleOMMetricsWriteTask = null;
       omRpcServer.stop();
       keyManager.stop();
       httpServer.stop();
+      metadataManager.stop();
       metrics.unRegister();
       unregisterMXBean();
     } catch (Exception e) {
@@ -500,6 +619,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       volumeManager.createVolume(args);
       AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.CREATE_VOLUME,
           (args == null) ? null : args.toAuditMap()));
+      metrics.incNumVolumes();
     } catch (Exception ex) {
       metrics.incNumVolumeCreateFails();
       AUDIT.logWriteFailure(
@@ -633,6 +753,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       volumeManager.deleteVolume(volume);
       AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_VOLUME,
           buildAuditMap(volume)));
+      metrics.decNumVolumes();
     } catch (Exception ex) {
       metrics.incNumVolumeDeleteFails();
       AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_VOLUME,
@@ -727,6 +848,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       bucketManager.createBucket(bucketInfo);
       AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.CREATE_BUCKET,
           (bucketInfo == null) ? null : bucketInfo.toAuditMap()));
+      metrics.incNumBuckets();
     } catch (Exception ex) {
       metrics.incNumBucketCreateFails();
       AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.CREATE_BUCKET,
@@ -835,6 +957,16 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       keyManager.commitKey(args, clientID);
       AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.COMMIT_KEY,
           auditMap));
+      // As when we commit the key it is visible, so we should increment here.
+      // As key also can have multiple versions, we need to increment keys
+      // only if version is 0. Currently we have not complete support of
+      // versioning of keys. So, this can be revisited later.
+      if (args != null && args.getLocationInfoList() != null &&
+          args.getLocationInfoList().size() > 0 &&
+          args.getLocationInfoList().get(0) != null &&
+          args.getLocationInfoList().get(0).getCreateVersion() == 0) {
+        metrics.incNumKeys();
+      }
     } catch (Exception ex) {
       metrics.incNumKeyCommitFails();
       AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.COMMIT_KEY,
@@ -925,6 +1057,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       keyManager.deleteKey(args);
       AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_KEY,
           (args == null) ? null : args.toAuditMap()));
+      metrics.decNumKeys();
     } catch (Exception ex) {
       metrics.incNumKeyDeleteFails();
       AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_KEY,
@@ -998,6 +1131,7 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
       bucketManager.deleteBucket(volume, bucket);
       AUDIT.logWriteSuccess(buildAuditMessageForSuccess(OMAction.DELETE_BUCKET,
           auditMap));
+      metrics.decNumBuckets();
     } catch (Exception ex) {
       metrics.incNumBucketDeleteFails();
       AUDIT.logWriteFailure(buildAuditMessageForFailure(OMAction.DELETE_BUCKET,
@@ -1136,7 +1270,30 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    */
   public void createS3Bucket(String userName, String s3BucketName)
       throws IOException {
-    s3BucketManager.createS3Bucket(userName, s3BucketName);
+    try {
+      metrics.incNumBucketCreates();
+      try {
+        boolean newVolumeCreate = s3BucketManager.createOzoneVolumeIfNeeded(
+            userName);
+        if (newVolumeCreate) {
+          metrics.incNumVolumeCreates();
+          metrics.incNumVolumes();
+        }
+      } catch (IOException ex) {
+        // We need to increment volume creates also because this is first
+        // time we are trying to create a volume, it failed. As we increment
+        // ops and create when we try to do that operation.
+        metrics.incNumVolumeCreates();
+        metrics.incNumVolumeCreateFails();
+        throw ex;
+      }
+
+      s3BucketManager.createS3Bucket(userName, s3BucketName);
+      metrics.incNumBuckets();
+    } catch (IOException ex) {
+      metrics.incNumBucketCreateFails();
+      throw ex;
+    }
   }
 
   @Override
@@ -1145,7 +1302,13 @@ public final class OzoneManager extends ServiceRuntimeInfoImpl
    */
   public void deleteS3Bucket(String s3BucketName)
       throws IOException {
-    s3BucketManager.deleteS3Bucket(s3BucketName);
+    try {
+      metrics.incNumBucketDeletes();
+      s3BucketManager.deleteS3Bucket(s3BucketName);
+      metrics.decNumBuckets();
+    } catch (IOException ex) {
+      metrics.incNumBucketDeleteFails();
+    }
   }
 
   @Override

+ 9 - 0
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManager.java

@@ -69,4 +69,13 @@ public interface S3BucketManager {
    * @param userName
    */
   String getOzoneVolumeNameForUser(String userName) throws IOException;
+
+  /**
+   * Create ozone volume if required, this will be needed during creates3Bucket.
+   * @param userName
+   * @return true - if volume is successfully created. false - if volume
+   * already exists or volume creation failure.
+   * @throws IOException - incase of volume creation failure.
+   */
+  boolean createOzoneVolumeIfNeeded(String userName) throws IOException;
 }

+ 8 - 9
hadoop-ozone/ozone-manager/src/main/java/org/apache/hadoop/ozone/om/S3BucketManagerImpl.java

@@ -95,14 +95,7 @@ public class S3BucketManagerImpl implements S3BucketManager {
     // You might wonder if all names map to this pattern, why we need to
     // store the S3 bucketName in a table at all. This is to support
     // anonymous access to bucket where the user name is absent.
-
-    // About Locking:
-    // We need to do this before we take the S3Bucket Lock since createVolume
-    // takes the userLock. So an attempt to take the user lock while holding
-    // S3Bucket lock will throw, so we need to create the volume if needed
-    // before we execute the bucket mapping functions.
     String ozoneVolumeName = formatOzoneVolumeName(userName);
-    createOzoneVolumeIfNeeded(userName, ozoneVolumeName);
 
     omMetadataManager.getLock().acquireS3Lock(bucketName);
     try {
@@ -157,20 +150,24 @@ public class S3BucketManagerImpl implements S3BucketManager {
     return String.format(OM_S3_VOLUME_PREFIX + "%s", userName);
   }
 
-  private void createOzoneVolumeIfNeeded(String userName, String volumeName)
+  @Override
+  public boolean createOzoneVolumeIfNeeded(String userName)
       throws IOException {
     // We don't have to time of check. time of use problem here because
     // this call is invoked while holding the s3Bucket lock.
+    boolean newVolumeCreate = true;
+    String ozoneVolumeName = formatOzoneVolumeName(userName);
     try {
       OmVolumeArgs args =
           OmVolumeArgs.newBuilder()
               .setAdminName(S3_ADMIN_NAME)
               .setOwnerName(userName)
-              .setVolume(volumeName)
+              .setVolume(ozoneVolumeName)
               .setQuotaInBytes(OzoneConsts.MAX_QUOTA_IN_BYTES)
               .build();
       volumeManager.createVolume(args);
     } catch (OMException exp) {
+      newVolumeCreate = false;
       if (exp.getResult().compareTo(FAILED_VOLUME_ALREADY_EXISTS) == 0) {
         if (LOG.isDebugEnabled()) {
           LOG.debug("Volume already exists. {}", exp.getMessage());
@@ -179,6 +176,8 @@ public class S3BucketManagerImpl implements S3BucketManager {
         throw exp;
       }
     }
+
+    return newVolumeCreate;
   }
 
   private void createOzoneBucket(String volumeName, String bucketName)

+ 2 - 5
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestKeyDeletingService.java

@@ -95,9 +95,6 @@ public class TestKeyDeletingService {
     createAndDeleteKeys(keyManager, keyCount, 1);
     KeyDeletingService keyDeletingService =
         (KeyDeletingService) keyManager.getDeletingService();
-    keyManager.start();
-    Assert.assertEquals(
-        keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), keyCount);
     GenericTestUtils.waitFor(
         () -> keyDeletingService.getDeletedKeyCount().get() >= keyCount,
         1000, 10000);
@@ -120,7 +117,7 @@ public class TestKeyDeletingService {
     createAndDeleteKeys(keyManager, keyCount, 1);
     KeyDeletingService keyDeletingService =
         (KeyDeletingService) keyManager.getDeletingService();
-    keyManager.start();
+    keyManager.start(conf);
     Assert.assertEquals(
         keyManager.getPendingDeletionKeys(Integer.MAX_VALUE).size(), keyCount);
     // Make sure that we have run the background thread 5 times more
@@ -147,7 +144,7 @@ public class TestKeyDeletingService {
     createAndDeleteKeys(keyManager, keyCount, 0);
     KeyDeletingService keyDeletingService =
         (KeyDeletingService) keyManager.getDeletingService();
-    keyManager.start();
+    keyManager.start(conf);
 
     // Since empty keys are directly deleted from db there should be no
     // pending deletion keys. Also deletedKeyCount should be zero.

+ 5 - 0
hadoop-ozone/ozone-manager/src/test/java/org/apache/hadoop/ozone/om/TestS3BucketManager.java

@@ -64,6 +64,7 @@ public class TestS3BucketManager {
   public void testCreateS3Bucket() throws IOException {
     S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
         volumeManager, bucketManager);
+    s3BucketManager.createOzoneVolumeIfNeeded("bilbo");
     s3BucketManager.createS3Bucket("bilbo", "bucket");
 
     // This call should have created a ozone volume called s3bilbo and bucket
@@ -104,6 +105,7 @@ public class TestS3BucketManager {
   public void testDeleteS3Bucket() throws IOException {
     S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
         volumeManager, bucketManager);
+    s3BucketManager.createOzoneVolumeIfNeeded("ozone");
     s3BucketManager.createS3Bucket("ozone", "s3bucket");
 
     // This call should have created a ozone volume called s3ozone and bucket
@@ -122,6 +124,7 @@ public class TestS3BucketManager {
   public void testGetS3BucketMapping() throws IOException {
     S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
         volumeManager, bucketManager);
+    s3BucketManager.createOzoneVolumeIfNeeded("bilbo");
     s3BucketManager.createS3Bucket("bilbo", "newBucket");
     String mapping = s3BucketManager.getOzoneBucketMapping("newBucket");
     Assert.assertTrue(mapping.startsWith("s3bilbo/"));
@@ -132,6 +135,7 @@ public class TestS3BucketManager {
   public void testGetOzoneNames() throws IOException {
     S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
         volumeManager, bucketManager);
+    s3BucketManager.createOzoneVolumeIfNeeded("batman");
     s3BucketManager.createS3Bucket("batman", "gotham");
     String volumeName = s3BucketManager.getOzoneVolumeName("gotham");
     Assert.assertTrue(volumeName.equalsIgnoreCase("s3batman"));
@@ -150,6 +154,7 @@ public class TestS3BucketManager {
   public void testBucketNameAreUnique() throws IOException {
     S3BucketManager s3BucketManager = new S3BucketManagerImpl(conf, metaMgr,
         volumeManager, bucketManager);
+    s3BucketManager.createOzoneVolumeIfNeeded("superman");
     s3BucketManager.createS3Bucket("superman", "metropolis");
     // recreating the same bucket  even with a different user will throw.
     thrown.expectMessage("Unable to create S3 bucket.");