|
@@ -18,14 +18,17 @@
|
|
|
|
|
|
package org.apache.hadoop.ozone.container.common.impl;
|
|
|
|
|
|
+import org.apache.commons.codec.binary.Hex;
|
|
|
import org.apache.commons.io.FileUtils;
|
|
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
|
|
import org.apache.hadoop.ozone.OzoneConfigKeys;
|
|
|
import org.apache.hadoop.ozone.OzoneConfiguration;
|
|
|
import org.apache.hadoop.ozone.OzoneConsts;
|
|
|
+import org.apache.hadoop.ozone.container.common.helpers.ChunkInfo;
|
|
|
import org.apache.hadoop.ozone.container.common.helpers.ContainerData;
|
|
|
import org.apache.hadoop.ozone.container.common.helpers.ContainerUtils;
|
|
|
+import org.apache.hadoop.ozone.container.common.helpers.Pipeline;
|
|
|
import org.apache.hadoop.ozone.container.common.utils.LevelDBStore;
|
|
|
import org.apache.hadoop.ozone.web.utils.OzoneUtils;
|
|
|
import org.junit.After;
|
|
@@ -33,13 +36,19 @@ import org.junit.AfterClass;
|
|
|
import org.junit.Assert;
|
|
|
import org.junit.Before;
|
|
|
import org.junit.BeforeClass;
|
|
|
+import org.junit.Rule;
|
|
|
import org.junit.Test;
|
|
|
+import org.junit.rules.ExpectedException;
|
|
|
|
|
|
import java.io.File;
|
|
|
import java.io.IOException;
|
|
|
import java.net.URL;
|
|
|
+import java.nio.file.DirectoryStream;
|
|
|
+import java.nio.file.Files;
|
|
|
import java.nio.file.Path;
|
|
|
import java.nio.file.Paths;
|
|
|
+import java.security.MessageDigest;
|
|
|
+import java.security.NoSuchAlgorithmException;
|
|
|
import java.util.HashMap;
|
|
|
import java.util.LinkedList;
|
|
|
import java.util.List;
|
|
@@ -47,6 +56,10 @@ import java.util.Map;
|
|
|
|
|
|
import static org.apache.hadoop.ozone.container.ContainerTestHelper
|
|
|
.createSingleNodePipeline;
|
|
|
+import static org.apache.hadoop.ozone.container.ContainerTestHelper.getChunk;
|
|
|
+import static org.apache.hadoop.ozone.container.ContainerTestHelper.getData;
|
|
|
+import static org.apache.hadoop.ozone.container.ContainerTestHelper
|
|
|
+ .setDataChecksum;
|
|
|
import static org.junit.Assert.fail;
|
|
|
|
|
|
/**
|
|
@@ -56,11 +69,15 @@ public class TestContainerPersistence {
|
|
|
|
|
|
static String path;
|
|
|
static ContainerManagerImpl containerManager;
|
|
|
+ static ChunkManagerImpl chunkManager;
|
|
|
static OzoneConfiguration conf;
|
|
|
static FsDatasetSpi fsDataSet;
|
|
|
static MiniDFSCluster cluster;
|
|
|
static List<Path> pathLists = new LinkedList<>();
|
|
|
|
|
|
+ @Rule
|
|
|
+ public ExpectedException exception = ExpectedException.none();
|
|
|
+
|
|
|
@BeforeClass
|
|
|
public static void init() throws IOException {
|
|
|
conf = new OzoneConfiguration();
|
|
@@ -84,6 +101,9 @@ public class TestContainerPersistence {
|
|
|
cluster.waitActive();
|
|
|
fsDataSet = cluster.getDataNodes().get(0).getFSDataset();
|
|
|
containerManager = new ContainerManagerImpl();
|
|
|
+ chunkManager = new ChunkManagerImpl(containerManager);
|
|
|
+ containerManager.setChunkManager(chunkManager);
|
|
|
+
|
|
|
}
|
|
|
|
|
|
@AfterClass
|
|
@@ -115,7 +135,8 @@ public class TestContainerPersistence {
|
|
|
ContainerData data = new ContainerData(containerName);
|
|
|
data.addMetadata("VOLUME", "shire");
|
|
|
data.addMetadata("owner)", "bilbo");
|
|
|
- containerManager.createContainer(createSingleNodePipeline(), data);
|
|
|
+ containerManager.createContainer(createSingleNodePipeline(containerName),
|
|
|
+ data);
|
|
|
Assert.assertTrue(containerManager.getContainerMap()
|
|
|
.containsKey(containerName));
|
|
|
ContainerManagerImpl.ContainerStatus status = containerManager
|
|
@@ -132,14 +153,11 @@ public class TestContainerPersistence {
|
|
|
String containerPathString = ContainerUtils.getContainerNameFromFile(new
|
|
|
File(status.getContainer().getContainerPath()));
|
|
|
|
|
|
- Path meta = Paths.get(containerPathString);
|
|
|
-
|
|
|
- String metadataFile = meta.toString() + OzoneConsts.CONTAINER_META;
|
|
|
- Assert.assertTrue(new File(metadataFile).exists());
|
|
|
+ Path meta = Paths.get(status.getContainer().getDBPath()).getParent();
|
|
|
+ Assert.assertTrue(Files.exists(meta));
|
|
|
|
|
|
|
|
|
String dbPath = status.getContainer().getDBPath();
|
|
|
-
|
|
|
LevelDBStore store = null;
|
|
|
try {
|
|
|
store = new LevelDBStore(new File(dbPath), false);
|
|
@@ -158,9 +176,9 @@ public class TestContainerPersistence {
|
|
|
ContainerData data = new ContainerData(containerName);
|
|
|
data.addMetadata("VOLUME", "shire");
|
|
|
data.addMetadata("owner)", "bilbo");
|
|
|
- containerManager.createContainer(createSingleNodePipeline(), data);
|
|
|
+ containerManager.createContainer(createSingleNodePipeline(containerName), data);
|
|
|
try {
|
|
|
- containerManager.createContainer(createSingleNodePipeline(), data);
|
|
|
+ containerManager.createContainer(createSingleNodePipeline(containerName), data);
|
|
|
fail("Expected Exception not thrown.");
|
|
|
} catch (IOException ex) {
|
|
|
Assert.assertNotNull(ex);
|
|
@@ -176,12 +194,12 @@ public class TestContainerPersistence {
|
|
|
ContainerData data = new ContainerData(containerName1);
|
|
|
data.addMetadata("VOLUME", "shire");
|
|
|
data.addMetadata("owner)", "bilbo");
|
|
|
- containerManager.createContainer(createSingleNodePipeline(), data);
|
|
|
+ containerManager.createContainer(createSingleNodePipeline(containerName1), data);
|
|
|
|
|
|
data = new ContainerData(containerName2);
|
|
|
data.addMetadata("VOLUME", "shire");
|
|
|
data.addMetadata("owner)", "bilbo");
|
|
|
- containerManager.createContainer(createSingleNodePipeline(), data);
|
|
|
+ containerManager.createContainer(createSingleNodePipeline(containerName2), data);
|
|
|
|
|
|
|
|
|
Assert.assertTrue(containerManager.getContainerMap()
|
|
@@ -189,7 +207,7 @@ public class TestContainerPersistence {
|
|
|
Assert.assertTrue(containerManager.getContainerMap()
|
|
|
.containsKey(containerName2));
|
|
|
|
|
|
- containerManager.deleteContainer(createSingleNodePipeline(),
|
|
|
+ containerManager.deleteContainer(createSingleNodePipeline(containerName1),
|
|
|
containerName1);
|
|
|
Assert.assertFalse(containerManager.getContainerMap()
|
|
|
.containsKey(containerName1));
|
|
@@ -200,7 +218,7 @@ public class TestContainerPersistence {
|
|
|
data = new ContainerData(containerName1);
|
|
|
data.addMetadata("VOLUME", "shire");
|
|
|
data.addMetadata("owner)", "bilbo");
|
|
|
- containerManager.createContainer(createSingleNodePipeline(), data);
|
|
|
+ containerManager.createContainer(createSingleNodePipeline(containerName1), data);
|
|
|
|
|
|
// Assert we still have both containers.
|
|
|
Assert.assertTrue(containerManager.getContainerMap()
|
|
@@ -228,7 +246,7 @@ public class TestContainerPersistence {
|
|
|
ContainerData data = new ContainerData(containerName);
|
|
|
data.addMetadata("VOLUME", "shire");
|
|
|
data.addMetadata("owner)", "bilbo");
|
|
|
- containerManager.createContainer(createSingleNodePipeline(), data);
|
|
|
+ containerManager.createContainer(createSingleNodePipeline(containerName), data);
|
|
|
testMap.put(containerName, data);
|
|
|
}
|
|
|
|
|
@@ -251,6 +269,204 @@ public class TestContainerPersistence {
|
|
|
// Assert that we listed all the keys that we had put into
|
|
|
// container.
|
|
|
Assert.assertTrue(testMap.isEmpty());
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Writes a single chunk.
|
|
|
+ *
|
|
|
+ * @throws IOException
|
|
|
+ * @throws NoSuchAlgorithmException
|
|
|
+ */
|
|
|
+ @Test
|
|
|
+ public void testWriteChunk() throws IOException, NoSuchAlgorithmException {
|
|
|
+ final int datalen = 1024;
|
|
|
+ String containerName = OzoneUtils.getRequestID();
|
|
|
+ String keyName = OzoneUtils.getRequestID();
|
|
|
+ Pipeline pipeline = createSingleNodePipeline(containerName);
|
|
|
+
|
|
|
+ pipeline.setContainerName(containerName);
|
|
|
+ ContainerData cData = new ContainerData(containerName);
|
|
|
+ cData.addMetadata("VOLUME", "shire");
|
|
|
+ cData.addMetadata("owner)", "bilbo");
|
|
|
+ containerManager.createContainer(pipeline, cData);
|
|
|
+ ChunkInfo info = getChunk(keyName, 0, 0, datalen);
|
|
|
+ byte[] data = getData(datalen);
|
|
|
+ setDataChecksum(info, data);
|
|
|
+ chunkManager.writeChunk(pipeline, keyName, info, data);
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Writes many chunks of the same key into different chunk files and verifies
|
|
|
+ * that we have that data in many files.
|
|
|
+ *
|
|
|
+ * @throws IOException
|
|
|
+ * @throws NoSuchAlgorithmException
|
|
|
+ */
|
|
|
+ @Test
|
|
|
+ public void testWritReadManyChunks() throws IOException,
|
|
|
+ NoSuchAlgorithmException {
|
|
|
+ final int datalen = 1024;
|
|
|
+ final int chunkCount = 1024;
|
|
|
+
|
|
|
+ String containerName = OzoneUtils.getRequestID();
|
|
|
+ String keyName = OzoneUtils.getRequestID();
|
|
|
+ Pipeline pipeline = createSingleNodePipeline(containerName);
|
|
|
+ Map<String, ChunkInfo> fileHashMap = new HashMap<>();
|
|
|
+
|
|
|
+ pipeline.setContainerName(containerName);
|
|
|
+ ContainerData cData = new ContainerData(containerName);
|
|
|
+ cData.addMetadata("VOLUME", "shire");
|
|
|
+ cData.addMetadata("owner)", "bilbo");
|
|
|
+ containerManager.createContainer(pipeline, cData);
|
|
|
+ for (int x = 0; x < chunkCount; x++) {
|
|
|
+ ChunkInfo info = getChunk(keyName, x, 0, datalen);
|
|
|
+ byte[] data = getData(datalen);
|
|
|
+ setDataChecksum(info, data);
|
|
|
+ chunkManager.writeChunk(pipeline, keyName, info, data);
|
|
|
+ String fileName = String.format("%s.data.%d", keyName, x);
|
|
|
+ fileHashMap.put(fileName, info);
|
|
|
+ }
|
|
|
+
|
|
|
+ ContainerData cNewData = containerManager.readContainer(containerName);
|
|
|
+ Assert.assertNotNull(cNewData);
|
|
|
+ Path dataDir = ContainerUtils.getDataDirectory(cNewData);
|
|
|
+
|
|
|
+ String globFormat = String.format("%s.data.*", keyName);
|
|
|
+ MessageDigest sha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
|
|
|
+
|
|
|
+ // Read chunk via file system and verify.
|
|
|
+ int count = 0;
|
|
|
+ try (DirectoryStream<Path> stream =
|
|
|
+ Files.newDirectoryStream(dataDir, globFormat)) {
|
|
|
+ for (Path fname : stream) {
|
|
|
+ sha.update(FileUtils.readFileToByteArray(fname.toFile()));
|
|
|
+ String val = Hex.encodeHexString(sha.digest());
|
|
|
+ Assert.assertEquals(fileHashMap.get(fname.getFileName().toString())
|
|
|
+ .getChecksum(),
|
|
|
+ val);
|
|
|
+ count++;
|
|
|
+ sha.reset();
|
|
|
+ }
|
|
|
+ Assert.assertEquals(chunkCount, count);
|
|
|
+
|
|
|
+ // Read chunk via ReadChunk call.
|
|
|
+ sha.reset();
|
|
|
+ for (int x = 0; x < chunkCount; x++) {
|
|
|
+ String fileName = String.format("%s.data.%d", keyName, x);
|
|
|
+ ChunkInfo info = fileHashMap.get(fileName);
|
|
|
+ byte[] data = chunkManager.readChunk(pipeline, keyName, info);
|
|
|
+ sha.update(data);
|
|
|
+ Assert.assertEquals(Hex.encodeHexString(sha.digest()),
|
|
|
+ info.getChecksum());
|
|
|
+ sha.reset();
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
|
|
|
+ /**
|
|
|
+ * Writes a single chunk and tries to overwrite that chunk without over write
|
|
|
+ * flag then re-tries with overwrite flag.
|
|
|
+ *
|
|
|
+ * @throws IOException
|
|
|
+ * @throws NoSuchAlgorithmException
|
|
|
+ */
|
|
|
+ @Test
|
|
|
+ public void testOverWrite() throws IOException,
|
|
|
+ NoSuchAlgorithmException {
|
|
|
+ final int datalen = 1024;
|
|
|
+ String containerName = OzoneUtils.getRequestID();
|
|
|
+ String keyName = OzoneUtils.getRequestID();
|
|
|
+ Pipeline pipeline = createSingleNodePipeline(containerName);
|
|
|
+
|
|
|
+ pipeline.setContainerName(containerName);
|
|
|
+ ContainerData cData = new ContainerData(containerName);
|
|
|
+ cData.addMetadata("VOLUME", "shire");
|
|
|
+ cData.addMetadata("owner)", "bilbo");
|
|
|
+ containerManager.createContainer(pipeline, cData);
|
|
|
+ ChunkInfo info = getChunk(keyName, 0, 0, datalen);
|
|
|
+ byte[] data = getData(datalen);
|
|
|
+ setDataChecksum(info, data);
|
|
|
+ chunkManager.writeChunk(pipeline, keyName, info, data);
|
|
|
+ try {
|
|
|
+ chunkManager.writeChunk(pipeline, keyName, info, data);
|
|
|
+ } catch(IOException ex) {
|
|
|
+ Assert.assertTrue(ex.getMessage().contains(
|
|
|
+ "Rejecting write chunk request. OverWrite flag required."));
|
|
|
+ }
|
|
|
+
|
|
|
+ // With the overwrite flag it should work now.
|
|
|
+ info.addMetadata(OzoneConsts.CHUNK_OVERWRITE, "true");
|
|
|
+ chunkManager.writeChunk(pipeline, keyName, info, data);
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * This test writes data as many small writes and tries to read back the data
|
|
|
+ * in a single large read.
|
|
|
+ *
|
|
|
+ * @throws IOException
|
|
|
+ * @throws NoSuchAlgorithmException
|
|
|
+ */
|
|
|
+ @Test
|
|
|
+ public void testMultipleWriteSingleRead() throws IOException,
|
|
|
+ NoSuchAlgorithmException {
|
|
|
+ final int datalen = 1024;
|
|
|
+ final int chunkCount = 1024;
|
|
|
+
|
|
|
+ String containerName = OzoneUtils.getRequestID();
|
|
|
+ String keyName = OzoneUtils.getRequestID();
|
|
|
+ Pipeline pipeline = createSingleNodePipeline(containerName);
|
|
|
+
|
|
|
+ pipeline.setContainerName(containerName);
|
|
|
+ ContainerData cData = new ContainerData(containerName);
|
|
|
+ cData.addMetadata("VOLUME", "shire");
|
|
|
+ cData.addMetadata("owner)", "bilbo");
|
|
|
+ containerManager.createContainer(pipeline, cData);
|
|
|
+ MessageDigest oldSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
|
|
|
+ for (int x = 0; x < chunkCount; x++) {
|
|
|
+ // we are writing to the same chunk file but at different offsets.
|
|
|
+ long offset = x * datalen;
|
|
|
+ ChunkInfo info = getChunk(keyName, 0, offset, datalen);
|
|
|
+ byte[] data = getData(datalen);
|
|
|
+ oldSha.update(data);
|
|
|
+ setDataChecksum(info, data);
|
|
|
+ chunkManager.writeChunk(pipeline, keyName, info, data);
|
|
|
+ }
|
|
|
+
|
|
|
+ // Request to read the whole data in a single go.
|
|
|
+ ChunkInfo largeChunk = getChunk(keyName, 0, 0, datalen * chunkCount);
|
|
|
+ byte[] newdata = chunkManager.readChunk(pipeline, keyName, largeChunk);
|
|
|
+ MessageDigest newSha = MessageDigest.getInstance(OzoneConsts.FILE_HASH);
|
|
|
+ newSha.update(newdata);
|
|
|
+ Assert.assertEquals(Hex.encodeHexString(oldSha.digest()),
|
|
|
+ Hex.encodeHexString(newSha.digest()));
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Writes a chunk and deletes it, re-reads to make sure it is gone.
|
|
|
+ *
|
|
|
+ * @throws IOException
|
|
|
+ * @throws NoSuchAlgorithmException
|
|
|
+ */
|
|
|
+ @Test
|
|
|
+ public void testDeleteChunk() throws IOException,
|
|
|
+ NoSuchAlgorithmException {
|
|
|
+ final int datalen = 1024;
|
|
|
+ String containerName = OzoneUtils.getRequestID();
|
|
|
+ String keyName = OzoneUtils.getRequestID();
|
|
|
+ Pipeline pipeline = createSingleNodePipeline(containerName);
|
|
|
+
|
|
|
+ pipeline.setContainerName(containerName);
|
|
|
+ ContainerData cData = new ContainerData(containerName);
|
|
|
+ cData.addMetadata("VOLUME", "shire");
|
|
|
+ cData.addMetadata("owner)", "bilbo");
|
|
|
+ containerManager.createContainer(pipeline, cData);
|
|
|
+ ChunkInfo info = getChunk(keyName, 0, 0, datalen);
|
|
|
+ byte[] data = getData(datalen);
|
|
|
+ setDataChecksum(info, data);
|
|
|
+ chunkManager.writeChunk(pipeline, keyName, info, data);
|
|
|
+ chunkManager.deleteChunk(pipeline, keyName, info);
|
|
|
+ exception.expect(IOException.class);
|
|
|
+ exception.expectMessage("Unable to find the chunk file.");
|
|
|
+ chunkManager.readChunk(pipeline, keyName, info);
|
|
|
}
|
|
|
-}
|
|
|
+}
|