Browse Source

HDFS-10351. Ozone: Optimize key writes to chunks by providing a bulk write implementation in ChunkOutputStream. Contributed by Chris Nauroth.

Anu Engineer 9 years ago
parent
commit
183175e60f

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/localstorage/OzoneMetadataManager.java

@@ -380,8 +380,6 @@ public final class OzoneMetadataManager {
   public ListVolumes listVolumes(ListArgs args) throws OzoneException {
     lock.readLock().lock();
     try {
-      Preconditions.checkState(args.getArgs() instanceof UserArgs);
-
       if (args.isRootScan()) {
         return listAllVolumes(args);
       }

+ 26 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/ozone/web/storage/ChunkOutputStream.java

@@ -100,6 +100,32 @@ class ChunkOutputStream extends OutputStream {
     }
   }
 
+  @Override
+  public void write(byte[] b, int off, int len) throws IOException {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if ((off < 0) || (off > b.length) || (len < 0) ||
+        ((off + len) > b.length) || ((off + len) < 0)) {
+      throw new IndexOutOfBoundsException();
+    }
+    if (len == 0) {
+      return;
+    }
+    checkOpen();
+    while (len > 0) {
+      int writeLen = Math.min(CHUNK_SIZE - buffer.position(), len);
+      int rollbackPosition = buffer.position();
+      int rollbackLimit = buffer.limit();
+      buffer.put(b, off, writeLen);
+      if (buffer.position() == CHUNK_SIZE) {
+        flushBufferToChunk(rollbackPosition, rollbackLimit);
+      }
+      off += writeLen;
+      len -= writeLen;
+    }
+  }
+
   @Override
   public synchronized void flush() throws IOException {
     checkOpen();