|
@@ -34,6 +34,7 @@ import org.apache.hadoop.hdfs.DistributedFileSystem;
|
|
|
import org.apache.hadoop.hdfs.MiniDFSCluster;
|
|
|
import org.apache.hadoop.hdfs.MiniDFSNNTopology;
|
|
|
import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
|
|
|
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
|
|
|
import org.apache.hadoop.hdfs.protocolPB.DatanodeProtocolClientSideTranslatorPB;
|
|
|
import org.apache.hadoop.hdfs.server.common.Storage;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.fsdataset.FsDatasetSpi;
|
|
@@ -64,6 +65,8 @@ import java.util.concurrent.TimeoutException;
|
|
|
import org.apache.commons.logging.Log;
|
|
|
import org.apache.commons.logging.LogFactory;
|
|
|
import org.mockito.Mockito;
|
|
|
+import org.mockito.invocation.InvocationOnMock;
|
|
|
+import org.mockito.stubbing.Answer;
|
|
|
|
|
|
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_DATA_DIR_KEY;
|
|
|
import static org.hamcrest.CoreMatchers.anyOf;
|
|
@@ -77,6 +80,7 @@ import static org.junit.Assert.assertTrue;
|
|
|
import static org.junit.Assert.fail;
|
|
|
import static org.mockito.Matchers.any;
|
|
|
import static org.mockito.Matchers.anyString;
|
|
|
+import static org.mockito.Mockito.doAnswer;
|
|
|
import static org.mockito.Mockito.timeout;
|
|
|
|
|
|
public class TestDataNodeHotSwapVolumes {
|
|
@@ -577,6 +581,7 @@ public class TestDataNodeHotSwapVolumes {
|
|
|
final DataNode dn = cluster.getDataNodes().get(dataNodeIdx);
|
|
|
final FileSystem fs = cluster.getFileSystem();
|
|
|
final Path testFile = new Path("/test");
|
|
|
+ final long lastTimeDiskErrorCheck = dn.getLastDiskErrorCheck();
|
|
|
|
|
|
FSDataOutputStream out = fs.create(testFile, REPLICATION);
|
|
|
|
|
@@ -586,6 +591,23 @@ public class TestDataNodeHotSwapVolumes {
|
|
|
out.write(writeBuf);
|
|
|
out.hflush();
|
|
|
|
|
|
+ // Make FsDatasetSpi#finalizeBlock a time-consuming operation. So if the
|
|
|
+ // BlockReceiver releases volume reference before finalizeBlock(), the blocks
|
|
|
+ // on the volume will be removed, and finalizeBlock() throws IOE.
|
|
|
+ final FsDatasetSpi<? extends FsVolumeSpi> data = dn.data;
|
|
|
+ dn.data = Mockito.spy(data);
|
|
|
+ doAnswer(new Answer<Object>() {
|
|
|
+ public Object answer(InvocationOnMock invocation)
|
|
|
+ throws IOException, InterruptedException {
|
|
|
+ Thread.sleep(1000);
|
|
|
+ // Bypass the argument to FsDatasetImpl#finalizeBlock to verify that
|
|
|
+ // the block is not removed, since the volume reference should not
|
|
|
+ // be released at this point.
|
|
|
+ data.finalizeBlock((ExtendedBlock) invocation.getArguments()[0]);
|
|
|
+ return null;
|
|
|
+ }
|
|
|
+ }).when(dn.data).finalizeBlock(any(ExtendedBlock.class));
|
|
|
+
|
|
|
final CyclicBarrier barrier = new CyclicBarrier(2);
|
|
|
|
|
|
List<String> oldDirs = getDataDirs(dn);
|
|
@@ -612,13 +634,19 @@ public class TestDataNodeHotSwapVolumes {
|
|
|
out.hflush();
|
|
|
out.close();
|
|
|
|
|
|
+ reconfigThread.join();
|
|
|
+
|
|
|
// Verify the file has sufficient replications.
|
|
|
DFSTestUtil.waitReplication(fs, testFile, REPLICATION);
|
|
|
// Read the content back
|
|
|
byte[] content = DFSTestUtil.readFileBuffer(fs, testFile);
|
|
|
assertEquals(BLOCK_SIZE, content.length);
|
|
|
|
|
|
- reconfigThread.join();
|
|
|
+ // If an IOException thrown from BlockReceiver#run, it triggers
|
|
|
+ // DataNode#checkDiskError(). So we can test whether checkDiskError() is called,
|
|
|
+ // to see whether there is IOException in BlockReceiver#run().
|
|
|
+ assertEquals(lastTimeDiskErrorCheck, dn.getLastDiskErrorCheck());
|
|
|
+
|
|
|
if (!exceptions.isEmpty()) {
|
|
|
throw new IOException(exceptions.get(0).getCause());
|
|
|
}
|