|
@@ -26,8 +26,10 @@ import java.security.MessageDigest;
|
|
|
import java.util.HashMap;
|
|
|
import java.util.Map;
|
|
|
import java.util.Random;
|
|
|
+import java.util.concurrent.CompletableFuture;
|
|
|
|
|
|
import com.google.common.base.Charsets;
|
|
|
+import org.assertj.core.api.Assertions;
|
|
|
import org.junit.Assume;
|
|
|
import org.junit.Test;
|
|
|
import org.slf4j.Logger;
|
|
@@ -35,22 +37,31 @@ import org.slf4j.LoggerFactory;
|
|
|
|
|
|
import org.apache.commons.codec.digest.DigestUtils;
|
|
|
import org.apache.commons.io.IOUtils;
|
|
|
-import org.apache.hadoop.conf.Configuration;
|
|
|
import org.apache.hadoop.fs.BBUploadHandle;
|
|
|
+import org.apache.hadoop.fs.CommonPathCapabilities;
|
|
|
import org.apache.hadoop.fs.FileStatus;
|
|
|
import org.apache.hadoop.fs.FileSystem;
|
|
|
import org.apache.hadoop.fs.MultipartUploader;
|
|
|
-import org.apache.hadoop.fs.MultipartUploaderFactory;
|
|
|
import org.apache.hadoop.fs.PartHandle;
|
|
|
import org.apache.hadoop.fs.Path;
|
|
|
import org.apache.hadoop.fs.PathHandle;
|
|
|
import org.apache.hadoop.fs.UploadHandle;
|
|
|
+import org.apache.hadoop.test.LambdaTestUtils;
|
|
|
+import org.apache.hadoop.util.DurationInfo;
|
|
|
|
|
|
import static org.apache.hadoop.fs.contract.ContractTestUtils.verifyPathExists;
|
|
|
+import static org.apache.hadoop.fs.impl.FutureIOSupport.awaitFuture;
|
|
|
import static org.apache.hadoop.io.IOUtils.cleanupWithLogger;
|
|
|
import static org.apache.hadoop.test.LambdaTestUtils.eventually;
|
|
|
import static org.apache.hadoop.test.LambdaTestUtils.intercept;
|
|
|
|
|
|
+/**
|
|
|
+ * Tests of multipart uploads.
|
|
|
+ * <p></p>
|
|
|
+ * <i>Note</i>: some of the tests get a random uploader between
|
|
|
+ * the two which are available. If tests fail intermittently,
|
|
|
+ * it may be because different uploaders are being selected.
|
|
|
+ */
|
|
|
public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
AbstractFSContractTestBase {
|
|
|
|
|
@@ -63,36 +74,44 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
*/
|
|
|
protected static final int SMALL_FILE = 100;
|
|
|
|
|
|
- private MultipartUploader mpu;
|
|
|
- private MultipartUploader mpu2;
|
|
|
+ protected static final int CONSISTENCY_INTERVAL = 1000;
|
|
|
+
|
|
|
+ private MultipartUploader uploader0;
|
|
|
+ private MultipartUploader uploader1;
|
|
|
private final Random random = new Random();
|
|
|
private UploadHandle activeUpload;
|
|
|
private Path activeUploadPath;
|
|
|
|
|
|
- protected String getMethodName() {
|
|
|
- return methodName.getMethodName();
|
|
|
- }
|
|
|
-
|
|
|
@Override
|
|
|
public void setup() throws Exception {
|
|
|
super.setup();
|
|
|
- Configuration conf = getContract().getConf();
|
|
|
- mpu = MultipartUploaderFactory.get(getFileSystem(), conf);
|
|
|
- mpu2 = MultipartUploaderFactory.get(getFileSystem(), conf);
|
|
|
+
|
|
|
+ final FileSystem fs = getFileSystem();
|
|
|
+ Path testPath = getContract().getTestPath();
|
|
|
+ uploader0 = fs.createMultipartUploader(testPath).build();
|
|
|
+ uploader1 = fs.createMultipartUploader(testPath).build();
|
|
|
}
|
|
|
|
|
|
@Override
|
|
|
public void teardown() throws Exception {
|
|
|
- if (mpu!= null && activeUpload != null) {
|
|
|
+ MultipartUploader uploader = getUploader(1);
|
|
|
+ if (uploader != null) {
|
|
|
+ if (activeUpload != null) {
|
|
|
+ abortUploadQuietly(activeUpload, activeUploadPath);
|
|
|
+ }
|
|
|
try {
|
|
|
- mpu.abort(activeUploadPath, activeUpload);
|
|
|
- } catch (FileNotFoundException ignored) {
|
|
|
- /* this is fine */
|
|
|
+ // round off with an abort of all uploads
|
|
|
+ Path teardown = getContract().getTestPath();
|
|
|
+ LOG.info("Teardown: aborting outstanding uploads under {}", teardown);
|
|
|
+ CompletableFuture<Integer> f
|
|
|
+ = uploader.abortUploadsUnderPath(teardown);
|
|
|
+ f.get();
|
|
|
} catch (Exception e) {
|
|
|
- LOG.info("in teardown", e);
|
|
|
+ LOG.warn("Exeception in teardown", e);
|
|
|
}
|
|
|
}
|
|
|
- cleanupWithLogger(LOG, mpu, mpu2);
|
|
|
+
|
|
|
+ cleanupWithLogger(LOG, uploader0, uploader1);
|
|
|
super.teardown();
|
|
|
}
|
|
|
|
|
@@ -192,16 +211,16 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
* @param index index of upload
|
|
|
* @return an uploader
|
|
|
*/
|
|
|
- protected MultipartUploader mpu(int index) {
|
|
|
- return (index % 2 == 0) ? mpu : mpu2;
|
|
|
+ protected MultipartUploader getUploader(int index) {
|
|
|
+ return (index % 2 == 0) ? uploader0 : uploader1;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* Pick a multipart uploader at random.
|
|
|
* @return an uploader
|
|
|
*/
|
|
|
- protected MultipartUploader randomMpu() {
|
|
|
- return mpu(random.nextInt(10));
|
|
|
+ protected MultipartUploader getRandomUploader() {
|
|
|
+ return getUploader(random.nextInt(10));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -211,39 +230,71 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
@Test
|
|
|
public void testSingleUpload() throws Exception {
|
|
|
Path file = methodPath();
|
|
|
- UploadHandle uploadHandle = initializeUpload(file);
|
|
|
+ UploadHandle uploadHandle = startUpload(file);
|
|
|
Map<Integer, PartHandle> partHandles = new HashMap<>();
|
|
|
MessageDigest origDigest = DigestUtils.getMd5Digest();
|
|
|
int size = SMALL_FILE;
|
|
|
byte[] payload = generatePayload(1, size);
|
|
|
origDigest.update(payload);
|
|
|
+ // use a single uploader
|
|
|
+ // note: the same is used here as it found a bug in the S3Guard
|
|
|
+ // DDB bulk operation state upload -the previous operation had
|
|
|
+ // added an entry to the ongoing state; this second call
|
|
|
+ // was interpreted as an inconsistent write.
|
|
|
+ MultipartUploader completer = uploader0;
|
|
|
+ // and upload with uploader 1 to validate cross-uploader uploads
|
|
|
PartHandle partHandle = putPart(file, uploadHandle, 1, payload);
|
|
|
partHandles.put(1, partHandle);
|
|
|
- PathHandle fd = completeUpload(file, uploadHandle, partHandles,
|
|
|
- origDigest,
|
|
|
- size);
|
|
|
+ PathHandle fd = complete(completer, uploadHandle, file,
|
|
|
+ partHandles);
|
|
|
+
|
|
|
+ validateUpload(file, origDigest, size);
|
|
|
|
|
|
+ // verify that if the implementation processes data immediately
|
|
|
+ // then a second attempt at the upload will fail.
|
|
|
if (finalizeConsumesUploadIdImmediately()) {
|
|
|
intercept(FileNotFoundException.class,
|
|
|
- () -> mpu.complete(file, partHandles, uploadHandle));
|
|
|
+ () -> complete(completer, uploadHandle, file, partHandles));
|
|
|
} else {
|
|
|
- PathHandle fd2 = mpu.complete(file, partHandles, uploadHandle);
|
|
|
+ // otherwise, the same or other uploader can try again.
|
|
|
+ PathHandle fd2 = complete(completer, uploadHandle, file, partHandles);
|
|
|
assertArrayEquals("Path handles differ", fd.toByteArray(),
|
|
|
fd2.toByteArray());
|
|
|
}
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * Initialize an upload.
|
|
|
+ * Complete IO for a specific uploader; await the response.
|
|
|
+ * @param uploader uploader
|
|
|
+ * @param uploadHandle Identifier
|
|
|
+ * @param file Target path for upload
|
|
|
+ * @param partHandles handles map of part number to part handle
|
|
|
+ * @return unique PathHandle identifier for the uploaded file.
|
|
|
+ */
|
|
|
+ protected PathHandle complete(
|
|
|
+ final MultipartUploader uploader,
|
|
|
+ final UploadHandle uploadHandle,
|
|
|
+ final Path file,
|
|
|
+ final Map<Integer, PartHandle> partHandles)
|
|
|
+ throws IOException {
|
|
|
+ try (DurationInfo d =
|
|
|
+ new DurationInfo(LOG, "Complete upload to %s", file)) {
|
|
|
+ return awaitFuture(
|
|
|
+ uploader.complete(uploadHandle, file, partHandles));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * start an upload.
|
|
|
* This saves the path and upload handle as the active
|
|
|
* upload, for aborting in teardown
|
|
|
* @param dest destination
|
|
|
* @return the handle
|
|
|
* @throws IOException failure to initialize
|
|
|
*/
|
|
|
- protected UploadHandle initializeUpload(final Path dest) throws IOException {
|
|
|
+ protected UploadHandle startUpload(final Path dest) throws IOException {
|
|
|
activeUploadPath = dest;
|
|
|
- activeUpload = randomMpu().initialize(dest);
|
|
|
+ activeUpload = awaitFuture(getRandomUploader().startUpload(dest));
|
|
|
return activeUpload;
|
|
|
}
|
|
|
|
|
@@ -283,12 +334,17 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
final int index,
|
|
|
final byte[] payload) throws IOException {
|
|
|
ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
|
|
|
- PartHandle partHandle = mpu(index)
|
|
|
- .putPart(file,
|
|
|
- new ByteArrayInputStream(payload),
|
|
|
- index,
|
|
|
- uploadHandle,
|
|
|
- payload.length);
|
|
|
+ PartHandle partHandle;
|
|
|
+ try (DurationInfo d =
|
|
|
+ new DurationInfo(LOG, "Put part %d (size %s) %s",
|
|
|
+ index,
|
|
|
+ payload.length,
|
|
|
+ file)) {
|
|
|
+ partHandle = awaitFuture(getUploader(index)
|
|
|
+ .putPart(uploadHandle, index, file,
|
|
|
+ new ByteArrayInputStream(payload),
|
|
|
+ payload.length));
|
|
|
+ }
|
|
|
timer.end("Uploaded part %s", index);
|
|
|
LOG.info("Upload bandwidth {} MB/s",
|
|
|
timer.bandwidthDescription(payload.length));
|
|
@@ -296,7 +352,7 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * Complete an upload with the active MPU instance.
|
|
|
+ * Complete an upload with a random uploader.
|
|
|
* @param file destination
|
|
|
* @param uploadHandle handle
|
|
|
* @param partHandles map of handles
|
|
@@ -312,36 +368,64 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
final int expectedLength) throws IOException {
|
|
|
PathHandle fd = complete(file, uploadHandle, partHandles);
|
|
|
|
|
|
- FileStatus status = verifyPathExists(getFileSystem(),
|
|
|
- "Completed file", file);
|
|
|
- assertEquals("length of " + status,
|
|
|
- expectedLength, status.getLen());
|
|
|
+ validateUpload(file, origDigest, expectedLength);
|
|
|
+ return fd;
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Complete an upload with a random uploader.
|
|
|
+ * @param file destination
|
|
|
+ * @param origDigest digest of source data (may be null)
|
|
|
+ * @param expectedLength expected length of result.
|
|
|
+ * @throws IOException IO failure
|
|
|
+ */
|
|
|
+ private void validateUpload(final Path file,
|
|
|
+ final MessageDigest origDigest,
|
|
|
+ final int expectedLength) throws IOException {
|
|
|
+ verifyPathExists(getFileSystem(),
|
|
|
+ "Completed file", file);
|
|
|
+ verifyFileLength(file, expectedLength);
|
|
|
|
|
|
if (origDigest != null) {
|
|
|
verifyContents(file, origDigest, expectedLength);
|
|
|
}
|
|
|
- return fd;
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* Verify the contents of a file.
|
|
|
* @param file path
|
|
|
* @param origDigest digest
|
|
|
- * @param expectedLength expected length (for logging B/W)
|
|
|
+ * @param expectedLength expected length (for logging download bandwidth)
|
|
|
* @throws IOException IO failure
|
|
|
*/
|
|
|
protected void verifyContents(final Path file,
|
|
|
final MessageDigest origDigest,
|
|
|
final int expectedLength) throws IOException {
|
|
|
ContractTestUtils.NanoTimer timer2 = new ContractTestUtils.NanoTimer();
|
|
|
- assertArrayEquals("digest of source and " + file
|
|
|
- + " differ",
|
|
|
- origDigest.digest(), digest(file));
|
|
|
+ Assertions.assertThat(digest(file))
|
|
|
+ .describedAs("digest of uploaded file %s", file)
|
|
|
+ .isEqualTo(origDigest.digest());
|
|
|
timer2.end("Completed digest", file);
|
|
|
LOG.info("Download bandwidth {} MB/s",
|
|
|
timer2.bandwidthDescription(expectedLength));
|
|
|
}
|
|
|
|
|
|
+ /**
|
|
|
+ * Verify the length of a file.
|
|
|
+ * @param file path
|
|
|
+ * @param expectedLength expected length
|
|
|
+ * @throws IOException IO failure
|
|
|
+ */
|
|
|
+ private void verifyFileLength(final Path file, final long expectedLength)
|
|
|
+ throws IOException {
|
|
|
+ FileStatus st = getFileSystem().getFileStatus(file);
|
|
|
+ Assertions.assertThat(st)
|
|
|
+ .describedAs("Uploaded file %s", st)
|
|
|
+ .matches(FileStatus::isFile)
|
|
|
+ .extracting(FileStatus::getLen)
|
|
|
+ .isEqualTo(expectedLength);
|
|
|
+ }
|
|
|
+
|
|
|
/**
|
|
|
* Perform the inner complete without verification.
|
|
|
* @param file destination path
|
|
@@ -353,21 +437,37 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
private PathHandle complete(final Path file,
|
|
|
final UploadHandle uploadHandle,
|
|
|
final Map<Integer, PartHandle> partHandles) throws IOException {
|
|
|
- ContractTestUtils.NanoTimer timer = new ContractTestUtils.NanoTimer();
|
|
|
- PathHandle fd = randomMpu().complete(file, partHandles, uploadHandle);
|
|
|
- timer.end("Completed upload to %s", file);
|
|
|
- return fd;
|
|
|
+ return complete(getRandomUploader(), uploadHandle, file,
|
|
|
+ partHandles);
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
* Abort an upload.
|
|
|
- * @param file path
|
|
|
* @param uploadHandle handle
|
|
|
+ * @param file path
|
|
|
* @throws IOException failure
|
|
|
*/
|
|
|
- private void abortUpload(final Path file, UploadHandle uploadHandle)
|
|
|
+ private void abortUpload(UploadHandle uploadHandle,
|
|
|
+ final Path file)
|
|
|
throws IOException {
|
|
|
- randomMpu().abort(file, uploadHandle);
|
|
|
+ try (DurationInfo d =
|
|
|
+ new DurationInfo(LOG, "Abort upload to %s", file)) {
|
|
|
+ awaitFuture(getRandomUploader().abort(uploadHandle, file));
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Abort an upload; swallows exceptions.
|
|
|
+ * @param uploadHandle handle
|
|
|
+ * @param file path
|
|
|
+ */
|
|
|
+ private void abortUploadQuietly(UploadHandle uploadHandle, Path file) {
|
|
|
+ try {
|
|
|
+ abortUpload(uploadHandle, file);
|
|
|
+ } catch (FileNotFoundException ignored) {
|
|
|
+ } catch (Exception e) {
|
|
|
+ LOG.info("aborting {}: {}", file, e.toString());
|
|
|
+ }
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -377,10 +477,10 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
@Test
|
|
|
public void testMultipartUpload() throws Exception {
|
|
|
Path file = methodPath();
|
|
|
- UploadHandle uploadHandle = initializeUpload(file);
|
|
|
+ UploadHandle uploadHandle = startUpload(file);
|
|
|
Map<Integer, PartHandle> partHandles = new HashMap<>();
|
|
|
MessageDigest origDigest = DigestUtils.getMd5Digest();
|
|
|
- final int payloadCount = getTestPayloadCount();
|
|
|
+ int payloadCount = getTestPayloadCount();
|
|
|
for (int i = 1; i <= payloadCount; ++i) {
|
|
|
PartHandle partHandle = buildAndPutPart(file, uploadHandle, i,
|
|
|
origDigest);
|
|
@@ -400,16 +500,16 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
FileSystem fs = getFileSystem();
|
|
|
Path file = path("testMultipartUpload");
|
|
|
try (MultipartUploader uploader =
|
|
|
- MultipartUploaderFactory.get(fs, null)) {
|
|
|
- UploadHandle uploadHandle = uploader.initialize(file);
|
|
|
+ fs.createMultipartUploader(file).build()) {
|
|
|
+ UploadHandle uploadHandle = uploader.startUpload(file).get();
|
|
|
|
|
|
Map<Integer, PartHandle> partHandles = new HashMap<>();
|
|
|
MessageDigest origDigest = DigestUtils.getMd5Digest();
|
|
|
byte[] payload = new byte[0];
|
|
|
origDigest.update(payload);
|
|
|
InputStream is = new ByteArrayInputStream(payload);
|
|
|
- PartHandle partHandle = uploader.putPart(file, is, 1, uploadHandle,
|
|
|
- payload.length);
|
|
|
+ PartHandle partHandle = awaitFuture(
|
|
|
+ uploader.putPart(uploadHandle, 1, file, is, payload.length));
|
|
|
partHandles.put(1, partHandle);
|
|
|
completeUpload(file, uploadHandle, partHandles, origDigest, 0);
|
|
|
}
|
|
@@ -422,7 +522,7 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
@Test
|
|
|
public void testUploadEmptyBlock() throws Exception {
|
|
|
Path file = methodPath();
|
|
|
- UploadHandle uploadHandle = initializeUpload(file);
|
|
|
+ UploadHandle uploadHandle = startUpload(file);
|
|
|
Map<Integer, PartHandle> partHandles = new HashMap<>();
|
|
|
partHandles.put(1, putPart(file, uploadHandle, 1, new byte[0]));
|
|
|
completeUpload(file, uploadHandle, partHandles, null, 0);
|
|
@@ -435,10 +535,10 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
@Test
|
|
|
public void testMultipartUploadReverseOrder() throws Exception {
|
|
|
Path file = methodPath();
|
|
|
- UploadHandle uploadHandle = initializeUpload(file);
|
|
|
+ UploadHandle uploadHandle = startUpload(file);
|
|
|
Map<Integer, PartHandle> partHandles = new HashMap<>();
|
|
|
MessageDigest origDigest = DigestUtils.getMd5Digest();
|
|
|
- final int payloadCount = getTestPayloadCount();
|
|
|
+ int payloadCount = getTestPayloadCount();
|
|
|
for (int i = 1; i <= payloadCount; ++i) {
|
|
|
byte[] payload = generatePayload(i);
|
|
|
origDigest.update(payload);
|
|
@@ -459,7 +559,7 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
throws Exception {
|
|
|
describe("Upload in reverse order and the part numbers are not contiguous");
|
|
|
Path file = methodPath();
|
|
|
- UploadHandle uploadHandle = initializeUpload(file);
|
|
|
+ UploadHandle uploadHandle = startUpload(file);
|
|
|
MessageDigest origDigest = DigestUtils.getMd5Digest();
|
|
|
int payloadCount = 2 * getTestPayloadCount();
|
|
|
for (int i = 2; i <= payloadCount; i += 2) {
|
|
@@ -482,22 +582,22 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
public void testMultipartUploadAbort() throws Exception {
|
|
|
describe("Upload and then abort it before completing");
|
|
|
Path file = methodPath();
|
|
|
- UploadHandle uploadHandle = initializeUpload(file);
|
|
|
- int end = 10;
|
|
|
+ UploadHandle uploadHandle = startUpload(file);
|
|
|
Map<Integer, PartHandle> partHandles = new HashMap<>();
|
|
|
for (int i = 12; i > 10; i--) {
|
|
|
partHandles.put(i, buildAndPutPart(file, uploadHandle, i, null));
|
|
|
}
|
|
|
- abortUpload(file, uploadHandle);
|
|
|
+ abortUpload(uploadHandle, file);
|
|
|
|
|
|
String contents = "ThisIsPart49\n";
|
|
|
int len = contents.getBytes(Charsets.UTF_8).length;
|
|
|
InputStream is = IOUtils.toInputStream(contents, "UTF-8");
|
|
|
|
|
|
intercept(IOException.class,
|
|
|
- () -> mpu.putPart(file, is, 49, uploadHandle, len));
|
|
|
+ () -> awaitFuture(
|
|
|
+ uploader0.putPart(uploadHandle, 49, file, is, len)));
|
|
|
intercept(IOException.class,
|
|
|
- () -> mpu.complete(file, partHandles, uploadHandle));
|
|
|
+ () -> complete(uploader0, uploadHandle, file, partHandles));
|
|
|
|
|
|
assertPathDoesNotExist("Uploaded file should not exist", file);
|
|
|
|
|
@@ -505,9 +605,9 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
// consumed by finalization operations (complete, abort).
|
|
|
if (finalizeConsumesUploadIdImmediately()) {
|
|
|
intercept(FileNotFoundException.class,
|
|
|
- () -> abortUpload(file, uploadHandle));
|
|
|
+ () -> abortUpload(uploadHandle, file));
|
|
|
} else {
|
|
|
- abortUpload(file, uploadHandle);
|
|
|
+ abortUpload(uploadHandle, file);
|
|
|
}
|
|
|
}
|
|
|
|
|
@@ -519,31 +619,55 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
Path file = methodPath();
|
|
|
ByteBuffer byteBuffer = ByteBuffer.wrap(
|
|
|
"invalid-handle".getBytes(Charsets.UTF_8));
|
|
|
- UploadHandle uploadHandle = BBUploadHandle.from(byteBuffer);
|
|
|
intercept(FileNotFoundException.class,
|
|
|
- () -> abortUpload(file, uploadHandle));
|
|
|
+ () -> abortUpload(BBUploadHandle.from(byteBuffer), file));
|
|
|
}
|
|
|
|
|
|
/**
|
|
|
- * Trying to abort with a handle of size 0 must fail.
|
|
|
+ * Trying to abort an upload with no data does not create a file.
|
|
|
*/
|
|
|
@Test
|
|
|
public void testAbortEmptyUpload() throws Exception {
|
|
|
describe("initialize upload and abort before uploading data");
|
|
|
Path file = methodPath();
|
|
|
- abortUpload(file, initializeUpload(file));
|
|
|
+ abortUpload(startUpload(file), file);
|
|
|
assertPathDoesNotExist("Uploaded file should not exist", file);
|
|
|
}
|
|
|
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Trying to abort an upload with no data does not create a file.
|
|
|
+ */
|
|
|
+ @Test
|
|
|
+ public void testAbortAllPendingUploads() throws Exception {
|
|
|
+ describe("initialize upload and abort the pending upload");
|
|
|
+ Path path = methodPath();
|
|
|
+ Path file = new Path(path, "child");
|
|
|
+ UploadHandle upload = startUpload(file);
|
|
|
+ try {
|
|
|
+ CompletableFuture<Integer> oF
|
|
|
+ = getRandomUploader().abortUploadsUnderPath(path.getParent());
|
|
|
+ int abortedUploads = awaitFuture(oF);
|
|
|
+ if (abortedUploads >= 0) {
|
|
|
+ // uploads can be aborted
|
|
|
+ Assertions.assertThat(abortedUploads)
|
|
|
+ .describedAs("Number of uploads aborted")
|
|
|
+ .isGreaterThanOrEqualTo(1);
|
|
|
+ assertPathDoesNotExist("Uploaded file should not exist", file);
|
|
|
+ }
|
|
|
+ } finally {
|
|
|
+ abortUploadQuietly(upload, file);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
/**
|
|
|
* Trying to abort with a handle of size 0 must fail.
|
|
|
*/
|
|
|
@Test
|
|
|
public void testAbortEmptyUploadHandle() throws Exception {
|
|
|
ByteBuffer byteBuffer = ByteBuffer.wrap(new byte[0]);
|
|
|
- UploadHandle uploadHandle = BBUploadHandle.from(byteBuffer);
|
|
|
intercept(IllegalArgumentException.class,
|
|
|
- () -> abortUpload(methodPath(), uploadHandle));
|
|
|
+ () -> abortUpload(BBUploadHandle.from(byteBuffer), methodPath()));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -553,10 +677,10 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
public void testCompleteEmptyUpload() throws Exception {
|
|
|
describe("Expect an empty MPU to fail, but still be abortable");
|
|
|
Path dest = methodPath();
|
|
|
- UploadHandle handle = initializeUpload(dest);
|
|
|
+ UploadHandle handle = startUpload(dest);
|
|
|
intercept(IllegalArgumentException.class,
|
|
|
- () -> mpu.complete(dest, new HashMap<>(), handle));
|
|
|
- abortUpload(dest, handle);
|
|
|
+ () -> complete(uploader0, handle, dest, new HashMap<>()));
|
|
|
+ abortUpload(handle, dest);
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -571,7 +695,7 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
byte[] payload = generatePayload(1);
|
|
|
InputStream is = new ByteArrayInputStream(payload);
|
|
|
intercept(IllegalArgumentException.class,
|
|
|
- () -> mpu.putPart(dest, is, 1, emptyHandle, payload.length));
|
|
|
+ () -> uploader0.putPart(emptyHandle, 1, dest, is, payload.length));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -581,7 +705,7 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
public void testCompleteEmptyUploadID() throws Exception {
|
|
|
describe("Expect IllegalArgumentException when complete uploadID is empty");
|
|
|
Path dest = methodPath();
|
|
|
- UploadHandle realHandle = initializeUpload(dest);
|
|
|
+ UploadHandle realHandle = startUpload(dest);
|
|
|
UploadHandle emptyHandle =
|
|
|
BBUploadHandle.from(ByteBuffer.wrap(new byte[0]));
|
|
|
Map<Integer, PartHandle> partHandles = new HashMap<>();
|
|
@@ -590,14 +714,14 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
partHandles.put(1, partHandle);
|
|
|
|
|
|
intercept(IllegalArgumentException.class,
|
|
|
- () -> mpu.complete(dest, partHandles, emptyHandle));
|
|
|
+ () -> complete(uploader0, emptyHandle, dest, partHandles));
|
|
|
|
|
|
// and, while things are setup, attempt to complete with
|
|
|
// a part index of 0
|
|
|
partHandles.clear();
|
|
|
partHandles.put(0, partHandle);
|
|
|
intercept(IllegalArgumentException.class,
|
|
|
- () -> mpu.complete(dest, partHandles, realHandle));
|
|
|
+ () -> complete(uploader0, realHandle, dest, partHandles));
|
|
|
}
|
|
|
|
|
|
/**
|
|
@@ -610,7 +734,7 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
public void testDirectoryInTheWay() throws Exception {
|
|
|
FileSystem fs = getFileSystem();
|
|
|
Path file = methodPath();
|
|
|
- UploadHandle uploadHandle = initializeUpload(file);
|
|
|
+ UploadHandle uploadHandle = startUpload(file);
|
|
|
Map<Integer, PartHandle> partHandles = new HashMap<>();
|
|
|
int size = SMALL_FILE;
|
|
|
PartHandle partHandle = putPart(file, uploadHandle, 1,
|
|
@@ -622,7 +746,7 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
() -> completeUpload(file, uploadHandle, partHandles, null,
|
|
|
size));
|
|
|
// abort should still work
|
|
|
- abortUpload(file, uploadHandle);
|
|
|
+ abortUpload(uploadHandle, file);
|
|
|
}
|
|
|
|
|
|
@Test
|
|
@@ -630,46 +754,44 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
|
|
|
// if the FS doesn't support concurrent uploads, this test is
|
|
|
// required to fail during the second initialization.
|
|
|
- final boolean concurrent = supportsConcurrentUploadsToSamePath();
|
|
|
+ boolean concurrent = supportsConcurrentUploadsToSamePath();
|
|
|
|
|
|
describe("testing concurrent uploads, MPU support for this is "
|
|
|
+ concurrent);
|
|
|
- final FileSystem fs = getFileSystem();
|
|
|
- final Path file = methodPath();
|
|
|
- final int size1 = SMALL_FILE;
|
|
|
- final int partId1 = 1;
|
|
|
- final byte[] payload1 = generatePayload(partId1, size1);
|
|
|
- final MessageDigest digest1 = DigestUtils.getMd5Digest();
|
|
|
+ Path file = methodPath();
|
|
|
+ int size1 = SMALL_FILE;
|
|
|
+ int partId1 = 1;
|
|
|
+ byte[] payload1 = generatePayload(partId1, size1);
|
|
|
+ MessageDigest digest1 = DigestUtils.getMd5Digest();
|
|
|
digest1.update(payload1);
|
|
|
- final UploadHandle upload1 = initializeUpload(file);
|
|
|
- final Map<Integer, PartHandle> partHandles1 = new HashMap<>();
|
|
|
+ UploadHandle upload1 = startUpload(file);
|
|
|
+ Map<Integer, PartHandle> partHandles1 = new HashMap<>();
|
|
|
|
|
|
// initiate part 2
|
|
|
// by using a different size, it's straightforward to see which
|
|
|
// version is visible, before reading/digesting the contents
|
|
|
- final int size2 = size1 * 2;
|
|
|
- final int partId2 = 2;
|
|
|
- final byte[] payload2 = generatePayload(partId1, size2);
|
|
|
- final MessageDigest digest2 = DigestUtils.getMd5Digest();
|
|
|
+ int size2 = size1 * 2;
|
|
|
+ int partId2 = 2;
|
|
|
+ byte[] payload2 = generatePayload(partId1, size2);
|
|
|
+ MessageDigest digest2 = DigestUtils.getMd5Digest();
|
|
|
digest2.update(payload2);
|
|
|
|
|
|
- final UploadHandle upload2;
|
|
|
+ UploadHandle upload2;
|
|
|
try {
|
|
|
- upload2 = initializeUpload(file);
|
|
|
+ upload2 = startUpload(file);
|
|
|
Assume.assumeTrue(
|
|
|
"The Filesystem is unexpectedly supporting concurrent uploads",
|
|
|
concurrent);
|
|
|
} catch (IOException e) {
|
|
|
if (!concurrent) {
|
|
|
// this is expected, so end the test
|
|
|
- LOG.debug("Expected exception raised on concurrent uploads {}", e);
|
|
|
+ LOG.debug("Expected exception raised on concurrent uploads", e);
|
|
|
return;
|
|
|
} else {
|
|
|
throw e;
|
|
|
}
|
|
|
}
|
|
|
- final Map<Integer, PartHandle> partHandles2 = new HashMap<>();
|
|
|
-
|
|
|
+ Map<Integer, PartHandle> partHandles2 = new HashMap<>();
|
|
|
|
|
|
assertNotEquals("Upload handles match", upload1, upload2);
|
|
|
|
|
@@ -686,13 +808,21 @@ public abstract class AbstractContractMultipartUploaderTest extends
|
|
|
// now upload part 2.
|
|
|
complete(file, upload2, partHandles2);
|
|
|
// and await the visible length to match
|
|
|
- eventually(timeToBecomeConsistentMillis(), 500,
|
|
|
- () -> {
|
|
|
- FileStatus status = fs.getFileStatus(file);
|
|
|
- assertEquals("File length in " + status,
|
|
|
- size2, status.getLen());
|
|
|
- });
|
|
|
+ eventually(timeToBecomeConsistentMillis(),
|
|
|
+ () -> verifyFileLength(file, size2),
|
|
|
+ new LambdaTestUtils.ProportionalRetryInterval(
|
|
|
+ CONSISTENCY_INTERVAL,
|
|
|
+ timeToBecomeConsistentMillis()));
|
|
|
|
|
|
verifyContents(file, digest2, size2);
|
|
|
}
|
|
|
+
|
|
|
+ @Test
|
|
|
+ public void testPathCapabilities() throws Throwable {
|
|
|
+ FileSystem fs = getFileSystem();
|
|
|
+ Assertions.assertThat(fs.hasPathCapability(getContract().getTestPath(),
|
|
|
+ CommonPathCapabilities.FS_MULTIPART_UPLOADER))
|
|
|
+ .describedAs("fs %s, lacks multipart upload capability", fs)
|
|
|
+ .isTrue();
|
|
|
+ }
|
|
|
}
|