|
@@ -77,6 +77,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
|
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor.CachedBlocksList.Type;
|
|
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.DataNode;
|
|
|
+import org.apache.hadoop.hdfs.server.datanode.DataNodeTestUtils;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
|
|
|
import org.apache.hadoop.io.nativeio.NativeIO;
|
|
|
import org.apache.hadoop.io.nativeio.NativeIO.POSIX.CacheManipulator;
|
|
@@ -1453,4 +1454,28 @@ public class TestCacheDirectives {
|
|
|
Thread.sleep(1000);
|
|
|
checkPendingCachedEmpty(cluster);
|
|
|
}
|
|
|
+
|
|
|
+ @Test(timeout=60000)
|
|
|
+ public void testNoBackingReplica() throws Exception {
|
|
|
+ // Cache all three replicas for a file.
|
|
|
+ final Path filename = new Path("/noback");
|
|
|
+ final short replication = (short) 3;
|
|
|
+ DFSTestUtil.createFile(dfs, filename, 1, replication, 0x0BAC);
|
|
|
+ dfs.addCachePool(new CachePoolInfo("pool"));
|
|
|
+ dfs.addCacheDirective(
|
|
|
+ new CacheDirectiveInfo.Builder().setPool("pool").setPath(filename)
|
|
|
+ .setReplication(replication).build());
|
|
|
+ waitForCachedBlocks(namenode, 1, replication, "testNoBackingReplica:1");
|
|
|
+ // Pause cache reports while we change the replication factor.
|
|
|
+ // This will orphan some cached replicas.
|
|
|
+ DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, true);
|
|
|
+ try {
|
|
|
+ dfs.setReplication(filename, (short) 1);
|
|
|
+ DFSTestUtil.waitForReplication(dfs, filename, (short) 1, 30000);
|
|
|
+ // The cache locations should drop down to 1 even without cache reports.
|
|
|
+ waitForCachedBlocks(namenode, 1, (short) 1, "testNoBackingReplica:2");
|
|
|
+ } finally {
|
|
|
+ DataNodeTestUtils.setCacheReportsDisabledForTests(cluster, false);
|
|
|
+ }
|
|
|
+ }
|
|
|
}
|