|
@@ -0,0 +1,498 @@
|
|
|
+/**
|
|
|
+ * Licensed to the Apache Software Foundation (ASF) under one
|
|
|
+ * or more contributor license agreements. See the NOTICE file
|
|
|
+ * distributed with this work for additional information
|
|
|
+ * regarding copyright ownership. The ASF licenses this file
|
|
|
+ * to you under the Apache License, Version 2.0 (the
|
|
|
+ * "License"); you may not use this file except in compliance
|
|
|
+ * with the License. You may obtain a copy of the License at
|
|
|
+ *
|
|
|
+ * http://www.apache.org/licenses/LICENSE-2.0
|
|
|
+ *
|
|
|
+ * Unless required by applicable law or agreed to in writing, software
|
|
|
+ * distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
+ * See the License for the specific language governing permissions and
|
|
|
+ * limitations under the License.
|
|
|
+ */
|
|
|
+package org.apache.hadoop.dfs;
|
|
|
+
|
|
|
+import java.io.IOException;
|
|
|
+import java.io.InputStream;
|
|
|
+import java.io.OutputStream;
|
|
|
+import java.util.HashMap;
|
|
|
+import org.apache.hadoop.conf.Configurable;
|
|
|
+import org.apache.hadoop.conf.Configuration;
|
|
|
+import org.apache.hadoop.util.DiskChecker.DiskErrorException;
|
|
|
+
|
|
|
+/**
|
|
|
+ * This class implements a simulated FSDataset.
|
|
|
+ *
|
|
|
+ * Blocks that are created are recorded but their data (plus their CRCs) are
|
|
|
+ * discarded.
|
|
|
+ * Fixed data is returned when blocks are read; a null CRC meta file is
|
|
|
+ * created for such data.
|
|
|
+ *
|
|
|
+ * This FSDataset does not remember any block information across its
|
|
|
+ * restarts; it does however offer an operation to inject blocks
|
|
|
+ * (See the TestInectionForSImulatedStorage()
|
|
|
+ * for a usage example of injection.
|
|
|
+ *
|
|
|
+ * Note the synchronization is coarse grained - it is at each method.
|
|
|
+ */
|
|
|
+
|
|
|
+public class SimulatedFSDataset implements FSConstants, FSDatasetInterface, Configurable{
|
|
|
+
|
|
|
+ public static final String CONFIG_PROPERTY_SIMULATED =
|
|
|
+ "dfs.datanode.simulateddatastorage";
|
|
|
+ public static final String CONFIG_PROPERTY_CAPACITY =
|
|
|
+ "dfs.datanode.simulateddatastorage.capacity";
|
|
|
+
|
|
|
+ public static final long DEFAULT_CAPACITY = 2L<<40; // 1 terabyte
|
|
|
+ public static final byte DEFAULT_DATABYTE = 9; // 1 terabyte
|
|
|
+ byte simulatedDataByte = DEFAULT_DATABYTE;
|
|
|
+ Configuration conf = null;
|
|
|
+
|
|
|
+ static byte[] nullCrcFileData;
|
|
|
+ {
|
|
|
+ DataChecksum checksum = DataChecksum.newDataChecksum( DataChecksum.
|
|
|
+ CHECKSUM_NULL, 16*1024 );
|
|
|
+ byte[] nullCrcHeader = checksum.getHeader();
|
|
|
+ nullCrcFileData = new byte[2 + nullCrcHeader.length];
|
|
|
+ nullCrcFileData[0] = (byte) ((FSDataset.METADATA_VERSION >>> 8) & 0xff);
|
|
|
+ nullCrcFileData[1] = (byte) (FSDataset.METADATA_VERSION & 0xff);
|
|
|
+ for (int i = 0; i < nullCrcHeader.length; i++) {
|
|
|
+ nullCrcFileData[i+2] = nullCrcHeader[i];
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private class BInfo { // information about a single block
|
|
|
+ Block theBlock;
|
|
|
+ private boolean finalized = false; // if not finalized => ongoing creation
|
|
|
+ SimulatedOutputStream oStream = null;
|
|
|
+ BInfo(Block b, boolean forWriting) throws IOException {
|
|
|
+ theBlock = new Block(b);
|
|
|
+ if (theBlock.len < 0) {
|
|
|
+ theBlock.len = 0;
|
|
|
+ }
|
|
|
+ if (!storage.alloc(theBlock.len)) { // expected length - actual length may
|
|
|
+ // be more - we find out at finalize
|
|
|
+ DataNode.LOG.warn("Lack of free storage on a block alloc");
|
|
|
+ throw new IOException("Creating block, no free space available");
|
|
|
+ }
|
|
|
+
|
|
|
+ if (forWriting) {
|
|
|
+ finalized = false;
|
|
|
+ oStream = new SimulatedOutputStream();
|
|
|
+ } else {
|
|
|
+ finalized = true;
|
|
|
+ oStream = null;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ synchronized long getlength() {
|
|
|
+ if (!finalized) {
|
|
|
+ return oStream.getLength();
|
|
|
+ } else {
|
|
|
+ return theBlock.len;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ synchronized SimulatedInputStream getIStream() throws IOException {
|
|
|
+ if (!finalized) {
|
|
|
+ // throw new IOException("Trying to read an unfinalized block");
|
|
|
+ return new SimulatedInputStream(oStream.getLength(), DEFAULT_DATABYTE);
|
|
|
+ } else {
|
|
|
+ return new SimulatedInputStream(theBlock.len, DEFAULT_DATABYTE);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ synchronized void finalizeBlock(long finalSize) throws IOException {
|
|
|
+ if (finalized) {
|
|
|
+ throw new IOException(
|
|
|
+ "Finalizing a block that has already been finalized" +
|
|
|
+ theBlock.blkid);
|
|
|
+ }
|
|
|
+ if (oStream == null) {
|
|
|
+ DataNode.LOG.error("Null oStream on unfinalized block - bug");
|
|
|
+ throw new IOException("Unexpected error on finalize");
|
|
|
+ }
|
|
|
+
|
|
|
+ if (oStream.getLength() != finalSize) {
|
|
|
+ DataNode.LOG.warn("Size passed to finalize (" + finalSize +
|
|
|
+ ")does not match what was written:" + oStream.getLength());
|
|
|
+ throw new IOException(
|
|
|
+ "Size passed to finalize does not match the amount of data written");
|
|
|
+ }
|
|
|
+ // We had allocated the expected length when block was created;
|
|
|
+ // adjust if necessary
|
|
|
+ long extraLen = finalSize - theBlock.len;
|
|
|
+ if (extraLen > 0) {
|
|
|
+ if (!storage.alloc(extraLen)) {
|
|
|
+ DataNode.LOG.warn("Lack of free storage on a block alloc");
|
|
|
+ throw new IOException("Creating block, no free space available");
|
|
|
+ }
|
|
|
+ } else {
|
|
|
+ storage.free(extraLen);
|
|
|
+ }
|
|
|
+ theBlock.len = finalSize;
|
|
|
+
|
|
|
+ finalized = true;
|
|
|
+ oStream = null;
|
|
|
+ return;
|
|
|
+ }
|
|
|
+
|
|
|
+ SimulatedInputStream getMetaIStream() {
|
|
|
+ return new SimulatedInputStream(nullCrcFileData);
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ static private class SimulatedStorage {
|
|
|
+ private long capacity; // in bytes
|
|
|
+ private long used; // in bytes
|
|
|
+
|
|
|
+ synchronized long getFree() {
|
|
|
+ return capacity - used;
|
|
|
+ }
|
|
|
+
|
|
|
+ synchronized long getCapacity() {
|
|
|
+ return capacity;
|
|
|
+ }
|
|
|
+
|
|
|
+ synchronized long getUsed() {
|
|
|
+ return used;
|
|
|
+ }
|
|
|
+
|
|
|
+ synchronized boolean alloc(long amount) {
|
|
|
+ if (getFree() >= amount) {
|
|
|
+ used += amount;
|
|
|
+ return true;
|
|
|
+ } else {
|
|
|
+ return false;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ synchronized void free(long amount) {
|
|
|
+ used -= amount;
|
|
|
+ }
|
|
|
+
|
|
|
+ SimulatedStorage(long cap) {
|
|
|
+ capacity = cap;
|
|
|
+ used = 0;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ private HashMap<Block, BInfo> blockMap = null;
|
|
|
+ private SimulatedStorage storage = null;
|
|
|
+
|
|
|
+ public SimulatedFSDataset(Configuration conf) throws IOException {
|
|
|
+ setConf(conf);
|
|
|
+ }
|
|
|
+
|
|
|
+ private SimulatedFSDataset() { // real construction when setConf called.. Uggg
|
|
|
+ }
|
|
|
+ public Configuration getConf() {
|
|
|
+ return conf;
|
|
|
+ }
|
|
|
+
|
|
|
+ public void setConf(Configuration iconf) {
|
|
|
+ conf = iconf;
|
|
|
+ storage = new SimulatedStorage(
|
|
|
+ conf.getLong(CONFIG_PROPERTY_CAPACITY, DEFAULT_CAPACITY));
|
|
|
+ //DataNode.LOG.info("Starting Simulated storage; Capacity = " + getCapacity() +
|
|
|
+ // "Used = " + getDfsUsed() + "Free =" + getRemaining());
|
|
|
+
|
|
|
+ blockMap = new HashMap<Block,BInfo>();
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized void injectBlocks(Block[] injectBlocks)
|
|
|
+ throws IOException {
|
|
|
+ if (injectBlocks != null) {
|
|
|
+ for (Block b: injectBlocks) { // if any blocks in list is bad, reject list
|
|
|
+ if (b == null) {
|
|
|
+ throw new NullPointerException("Null blocks in block list");
|
|
|
+ }
|
|
|
+ if (isValidBlock(b)) {
|
|
|
+ throw new IOException("Block already exists in block list");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ blockMap = new HashMap<Block,BInfo>(injectBlocks.length);
|
|
|
+ for (Block b: injectBlocks) {
|
|
|
+ BInfo binfo = new BInfo(b, false);
|
|
|
+ blockMap.put(b, binfo);
|
|
|
+ }
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized void finalizeBlock(Block b) throws IOException {
|
|
|
+ BInfo binfo = blockMap.get(b);
|
|
|
+ if (binfo == null) {
|
|
|
+ throw new IOException("Finalizing a non existing block " + b);
|
|
|
+ }
|
|
|
+ binfo.finalizeBlock(b.getNumBytes());
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized Block[] getBlockReport() {
|
|
|
+ Block[] blockTable = new Block[blockMap.size()];
|
|
|
+ int i = 0;
|
|
|
+ for (Block b: blockMap.keySet()) {
|
|
|
+ blockTable[i++] = blockMap.get(b).theBlock;
|
|
|
+ }
|
|
|
+ return blockTable;
|
|
|
+ }
|
|
|
+
|
|
|
+ public long getCapacity() throws IOException {
|
|
|
+ return storage.getCapacity();
|
|
|
+ }
|
|
|
+
|
|
|
+ public long getDfsUsed() throws IOException {
|
|
|
+ return storage.getUsed();
|
|
|
+ }
|
|
|
+
|
|
|
+ public long getRemaining() throws IOException {
|
|
|
+ return storage.getFree();
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized long getLength(Block b) throws IOException {
|
|
|
+ BInfo binfo = blockMap.get(b);
|
|
|
+ if (binfo == null) {
|
|
|
+ throw new IOException("Finalizing a non existing block " + b);
|
|
|
+ }
|
|
|
+ return binfo.getlength();
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized void invalidate(Block[] invalidBlks) throws IOException {
|
|
|
+ boolean error = false;
|
|
|
+ if (invalidBlks == null) {
|
|
|
+ return;
|
|
|
+ }
|
|
|
+ for (Block b: invalidBlks) {
|
|
|
+ if (b == null) {
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ BInfo binfo = blockMap.get(b);
|
|
|
+ if (binfo == null) {
|
|
|
+ error = true;
|
|
|
+ DataNode.LOG.warn("Invalidate: Missing block");
|
|
|
+ continue;
|
|
|
+ }
|
|
|
+ storage.free(binfo.getlength());
|
|
|
+ blockMap.remove(b);
|
|
|
+ }
|
|
|
+ if (error) {
|
|
|
+ throw new IOException("Invalidate: Missing blocks.");
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized boolean isValidBlock(Block b) {
|
|
|
+ return (blockMap.containsKey(b));
|
|
|
+ }
|
|
|
+
|
|
|
+ public String toString() {
|
|
|
+ return "Simulated FSDataset";
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized BlockWriteStreams writeToBlock(Block b)
|
|
|
+ throws IOException {
|
|
|
+ if (isValidBlock(b)) {
|
|
|
+ throw new IOException("Block " + b +
|
|
|
+ " is valid, and cannot be written to.");
|
|
|
+ }
|
|
|
+ BInfo binfo = new BInfo(b, true);
|
|
|
+ blockMap.put(b, binfo);
|
|
|
+ SimulatedOutputStream crcStream = new SimulatedOutputStream();
|
|
|
+ return new BlockWriteStreams(binfo.oStream, crcStream);
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized InputStream getBlockInputStream(Block b)
|
|
|
+ throws IOException {
|
|
|
+ BInfo binfo = blockMap.get(b);
|
|
|
+ if (binfo == null) {
|
|
|
+ throw new IOException("No such Block " + b );
|
|
|
+ }
|
|
|
+
|
|
|
+ //DataNode.LOG.info("Opening block(" + b.blkid + ") of length " + b.len);
|
|
|
+ return binfo.getIStream();
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized InputStream getBlockInputStream(Block b, long seekOffset)
|
|
|
+ throws IOException {
|
|
|
+ InputStream result = getBlockInputStream(b);
|
|
|
+ result.skip(seekOffset);
|
|
|
+ return result;
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Returns metaData of block b as an input stream
|
|
|
+ * @param b - the block for which the metadata is desired
|
|
|
+ * @return metaData of block b as an input stream
|
|
|
+ * @throws IOException - block does not exist or problems accessing
|
|
|
+ * the meta file
|
|
|
+ */
|
|
|
+ private synchronized InputStream getMetaDataInStream(Block b)
|
|
|
+ throws IOException {
|
|
|
+ BInfo binfo = blockMap.get(b);
|
|
|
+ if (binfo == null) {
|
|
|
+ throw new IOException("No such Block " + b );
|
|
|
+ }
|
|
|
+ if (!binfo.finalized) {
|
|
|
+ throw new IOException("Block " + b +
|
|
|
+ " is being written, its meta cannot be read");
|
|
|
+ }
|
|
|
+ return binfo.getMetaIStream();
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized long getMetaDataLength(Block b) throws IOException {
|
|
|
+ BInfo binfo = blockMap.get(b);
|
|
|
+ if (binfo == null) {
|
|
|
+ throw new IOException("No such Block " + b );
|
|
|
+ }
|
|
|
+ if (!binfo.finalized) {
|
|
|
+ throw new IOException("Block " + b +
|
|
|
+ " is being written, its metalength cannot be read");
|
|
|
+ }
|
|
|
+ return binfo.getMetaIStream().getLength();
|
|
|
+ }
|
|
|
+
|
|
|
+ public MetaDataInputStream getMetaDataInputStream(Block b)
|
|
|
+ throws IOException {
|
|
|
+
|
|
|
+ return new MetaDataInputStream(getMetaDataInStream(b),
|
|
|
+ getMetaDataLength(b));
|
|
|
+ }
|
|
|
+
|
|
|
+ public synchronized boolean metaFileExists(Block b) throws IOException {
|
|
|
+ if (!isValidBlock(b)) {
|
|
|
+ throw new IOException("Block " + b +
|
|
|
+ " is valid, and cannot be written to.");
|
|
|
+ }
|
|
|
+ return true; // crc exists for all valid blocks
|
|
|
+ }
|
|
|
+
|
|
|
+ public void checkDataDir() throws DiskErrorException {
|
|
|
+ // nothing to check for simulated data set
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+ /**
|
|
|
+ * Simulated input and output streams
|
|
|
+ *
|
|
|
+ */
|
|
|
+ static private class SimulatedInputStream extends java.io.InputStream {
|
|
|
+
|
|
|
+
|
|
|
+ byte theRepeatedData = 7;
|
|
|
+ long length; // bytes
|
|
|
+ int currentPos = 0;
|
|
|
+ byte[] data = null;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * An input stream of size l with repeated bytes
|
|
|
+ * @param l
|
|
|
+ * @param iRepeatedData
|
|
|
+ */
|
|
|
+ SimulatedInputStream(long l, byte iRepeatedData) {
|
|
|
+ length = l;
|
|
|
+ theRepeatedData = iRepeatedData;
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * An input stream of of the supplied data
|
|
|
+ *
|
|
|
+ * @param iData
|
|
|
+ */
|
|
|
+ SimulatedInputStream(byte[] iData) {
|
|
|
+ data = iData;
|
|
|
+ length = data.length;
|
|
|
+
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ *
|
|
|
+ * @return the lenght of the input stream
|
|
|
+ */
|
|
|
+ long getLength() {
|
|
|
+ return length;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public int read() throws IOException {
|
|
|
+ if (currentPos >= length)
|
|
|
+ return -1;
|
|
|
+ if (data !=null) {
|
|
|
+ return data[currentPos++];
|
|
|
+ } else {
|
|
|
+ currentPos++;
|
|
|
+ return theRepeatedData;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public int read(byte[] b) throws IOException {
|
|
|
+
|
|
|
+ if (b == null) {
|
|
|
+ throw new NullPointerException();
|
|
|
+ }
|
|
|
+ if (b.length == 0) {
|
|
|
+ return 0;
|
|
|
+ }
|
|
|
+ if (currentPos >= length) { // EOF
|
|
|
+ return -1;
|
|
|
+ }
|
|
|
+ int bytesRead = (int) Math.min(b.length, length-currentPos);
|
|
|
+ if (data != null) {
|
|
|
+ System.arraycopy(data, currentPos, b, 0, bytesRead);
|
|
|
+ } else { // all data is zero
|
|
|
+ for (int i : b) {
|
|
|
+ b[i] = theRepeatedData;
|
|
|
+ }
|
|
|
+ }
|
|
|
+ currentPos += bytesRead;
|
|
|
+ return bytesRead;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ * This class implements an output stream that merely throws its data away, but records its
|
|
|
+ * length.
|
|
|
+ *
|
|
|
+ */
|
|
|
+ static private class SimulatedOutputStream extends OutputStream {
|
|
|
+ long length = 0;
|
|
|
+
|
|
|
+ /**
|
|
|
+ * constructor for Simulated Output Steram
|
|
|
+ */
|
|
|
+ SimulatedOutputStream() {
|
|
|
+ }
|
|
|
+
|
|
|
+ /**
|
|
|
+ *
|
|
|
+ * @return the lenght of the data created so far.
|
|
|
+ */
|
|
|
+ long getLength() {
|
|
|
+ return length;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void write(int arg0) throws IOException {
|
|
|
+ length++;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void write(byte[] b) throws IOException {
|
|
|
+ length += b.length;
|
|
|
+ }
|
|
|
+
|
|
|
+ @Override
|
|
|
+ public void write(byte[] b,
|
|
|
+ int off,
|
|
|
+ int len) throws IOException {
|
|
|
+ length += len;
|
|
|
+ }
|
|
|
+ }
|
|
|
+
|
|
|
+
|
|
|
+}
|