瀏覽代碼

HDDS-1044. Client doesn't propogate correct error code to client on out of disk space. Contributed by Yiqun Lin.

Bharat Viswanadham 6 年之前
父節點
當前提交
912d9f790d

+ 14 - 3
hadoop-hdds/container-service/src/main/java/org/apache/hadoop/ozone/container/common/impl/HddsDispatcher.java

@@ -160,7 +160,15 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
           || cmdType == ContainerProtos.Type.PutSmallFile)) {
         // If container does not exist, create one for WriteChunk and
         // PutSmallFile request
-        createContainer(msg);
+        responseProto = createContainer(msg);
+        if (responseProto.getResult() != Result.SUCCESS) {
+          StorageContainerException sce = new StorageContainerException(
+              "ContainerID " + containerID + " creation failed",
+              responseProto.getResult());
+          audit(action, eventType, params, AuditEventStatus.FAILURE, sce);
+          return ContainerUtils.logAndReturnError(LOG, sce, msg);
+        }
+
         container = getContainer(containerID);
       }
 
@@ -250,8 +258,11 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
    * Create a container using the input container request.
    * @param containerRequest - the container request which requires container
    *                         to be created.
+   * @return ContainerCommandResponseProto container command response.
    */
-  private void createContainer(ContainerCommandRequestProto containerRequest) {
+  @VisibleForTesting
+  ContainerCommandResponseProto createContainer(
+      ContainerCommandRequestProto containerRequest) {
     ContainerProtos.CreateContainerRequestProto.Builder createRequest =
         ContainerProtos.CreateContainerRequestProto.newBuilder();
     ContainerType containerType =
@@ -270,7 +281,7 @@ public class HddsDispatcher implements ContainerDispatcher, Auditor {
     // TODO: Assuming the container type to be KeyValueContainer for now.
     // We need to get container type from the containerRequest.
     Handler handler = getHandler(containerType);
-    handler.handle(requestBuilder.build(), null, null);
+    return handler.handle(requestBuilder.build(), null, null);
   }
 
   /**

+ 69 - 17
hadoop-hdds/container-service/src/test/java/org/apache/hadoop/ozone/container/common/impl/TestHddsDispatcher.java

@@ -38,6 +38,7 @@ import org.apache.hadoop.hdds.protocol.proto
     .StorageContainerDatanodeProtocolProtos.ContainerAction;
 import org.apache.hadoop.ozone.common.Checksum;
 import org.apache.hadoop.ozone.container.common.helpers.ContainerMetrics;
+import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
 import org.apache.hadoop.ozone.container.common.interfaces.Container;
 import org.apache.hadoop.ozone.container.common.interfaces.Handler;
 import org.apache.hadoop.ozone.container.common.statemachine.DatanodeStateMachine;
@@ -59,6 +60,7 @@ import java.util.UUID;
 
 import static java.nio.charset.StandardCharsets.UTF_8;
 import static org.apache.hadoop.hdds.scm.ScmConfigKeys.HDDS_DATANODE_DIR_KEY;
+import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.times;
 import static org.mockito.Mockito.verify;
 
@@ -130,23 +132,7 @@ public class TestHddsDispatcher {
       OzoneConfiguration conf = new OzoneConfiguration();
       conf.set(HDDS_DATANODE_DIR_KEY, testDir);
       DatanodeDetails dd = randomDatanodeDetails();
-      ContainerSet containerSet = new ContainerSet();
-      VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
-      DatanodeStateMachine stateMachine = Mockito.mock(
-          DatanodeStateMachine.class);
-      StateContext context = Mockito.mock(StateContext.class);
-      Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
-      Mockito.when(context.getParent()).thenReturn(stateMachine);
-      ContainerMetrics metrics = ContainerMetrics.create(conf);
-      Map<ContainerType, Handler> handlers = Maps.newHashMap();
-      for (ContainerType containerType : ContainerType.values()) {
-        handlers.put(containerType,
-            Handler.getHandlerForContainerType(containerType, conf, context,
-                containerSet, volumeSet, metrics));
-      }
-      HddsDispatcher hddsDispatcher = new HddsDispatcher(
-          conf, containerSet, volumeSet, handlers, context, metrics);
-      hddsDispatcher.setScmId(scmId.toString());
+      HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
       ContainerCommandRequestProto writeChunkRequest =
           getWriteChunkRequest(dd.getUuidString(), 1L, 1L);
       // send read chunk request and make sure container does not exist
@@ -169,6 +155,72 @@ public class TestHddsDispatcher {
     }
   }
 
+  @Test
+  public void testWriteChunkWithCreateContainerFailure() throws IOException {
+    String testDir = GenericTestUtils.getTempPath(
+        TestHddsDispatcher.class.getSimpleName());
+    try {
+      UUID scmId = UUID.randomUUID();
+      OzoneConfiguration conf = new OzoneConfiguration();
+      conf.set(HDDS_DATANODE_DIR_KEY, testDir);
+      DatanodeDetails dd = randomDatanodeDetails();
+      HddsDispatcher hddsDispatcher = createDispatcher(dd, scmId, conf);
+      ContainerCommandRequestProto writeChunkRequest = getWriteChunkRequest(
+          dd.getUuidString(), 1L, 1L);
+
+      HddsDispatcher mockDispatcher = Mockito.spy(hddsDispatcher);
+      ContainerCommandResponseProto.Builder builder = ContainerUtils
+          .getContainerCommandResponse(writeChunkRequest,
+              ContainerProtos.Result.DISK_OUT_OF_SPACE, "");
+      // Return DISK_OUT_OF_SPACE response when writing chunk
+      // with container creation.
+      Mockito.doReturn(builder.build()).when(mockDispatcher)
+          .createContainer(writeChunkRequest);
+
+      GenericTestUtils.LogCapturer logCapturer = GenericTestUtils.LogCapturer
+          .captureLogs(HddsDispatcher.LOG);
+      // send write chunk request without sending create container
+      mockDispatcher.dispatch(writeChunkRequest, null);
+      // verify the error log
+      assertTrue(logCapturer.getOutput()
+          .contains("ContainerID " + writeChunkRequest.getContainerID()
+              + " creation failed : Result: DISK_OUT_OF_SPACE"));
+    } finally {
+      FileUtils.deleteDirectory(new File(testDir));
+    }
+  }
+
+  /**
+   * Creates HddsDispatcher instance with given infos.
+   * @param dd datanode detail info.
+   * @param scmId UUID of scm id.
+   * @param conf configuration be used.
+   * @return HddsDispatcher HddsDispatcher instance.
+   * @throws IOException
+   */
+  private HddsDispatcher createDispatcher(DatanodeDetails dd, UUID scmId,
+      OzoneConfiguration conf) throws IOException {
+    ContainerSet containerSet = new ContainerSet();
+    VolumeSet volumeSet = new VolumeSet(dd.getUuidString(), conf);
+    DatanodeStateMachine stateMachine = Mockito.mock(
+        DatanodeStateMachine.class);
+    StateContext context = Mockito.mock(StateContext.class);
+    Mockito.when(stateMachine.getDatanodeDetails()).thenReturn(dd);
+    Mockito.when(context.getParent()).thenReturn(stateMachine);
+    ContainerMetrics metrics = ContainerMetrics.create(conf);
+    Map<ContainerType, Handler> handlers = Maps.newHashMap();
+    for (ContainerType containerType : ContainerType.values()) {
+      handlers.put(containerType,
+          Handler.getHandlerForContainerType(containerType, conf, context,
+              containerSet, volumeSet, metrics));
+    }
+
+    HddsDispatcher hddsDispatcher = new HddsDispatcher(
+        conf, containerSet, volumeSet, handlers, context, metrics);
+    hddsDispatcher.setScmId(scmId.toString());
+    return hddsDispatcher;
+  }
+
   // This method has to be removed once we move scm/TestUtils.java
   // from server-scm project to container-service or to common project.
   private static DatanodeDetails randomDatanodeDetails() {