|
@@ -33,6 +33,7 @@ import static org.junit.Assert.fail;
|
|
|
import java.io.IOException;
|
|
|
import java.util.concurrent.ThreadLocalRandom;
|
|
|
|
|
|
+import org.apache.hadoop.fs.CommonPathCapabilities;
|
|
|
import org.apache.hadoop.hdfs.server.blockmanagement.BlockManagerTestUtil;
|
|
|
import org.apache.hadoop.hdfs.server.datanode.DataNodeFaultInjector;
|
|
|
import org.apache.hadoop.ipc.RemoteException;
|
|
@@ -143,6 +144,8 @@ public class TestFileTruncate {
|
|
|
writeContents(contents, fileLength, p);
|
|
|
|
|
|
int newLength = fileLength - toTruncate;
|
|
|
+ assertTrue("DFS supports truncate",
|
|
|
+ fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE));
|
|
|
boolean isReady = fs.truncate(p, newLength);
|
|
|
LOG.info("fileLength=" + fileLength + ", newLength=" + newLength
|
|
|
+ ", toTruncate=" + toTruncate + ", isReady=" + isReady);
|
|
@@ -176,6 +179,8 @@ public class TestFileTruncate {
|
|
|
|
|
|
for(int n = data.length; n > 0; ) {
|
|
|
final int newLength = ThreadLocalRandom.current().nextInt(n);
|
|
|
+ assertTrue("DFS supports truncate",
|
|
|
+ fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE));
|
|
|
final boolean isReady = fs.truncate(p, newLength);
|
|
|
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
|
|
|
assertEquals("File must be closed for truncating at the block boundary",
|
|
@@ -209,6 +214,8 @@ public class TestFileTruncate {
|
|
|
final int newLength = data.length - 1;
|
|
|
assert newLength % BLOCK_SIZE != 0 :
|
|
|
" newLength must not be multiple of BLOCK_SIZE";
|
|
|
+ assertTrue("DFS supports truncate",
|
|
|
+ fs.hasPathCapability(p, CommonPathCapabilities.FS_TRUNCATE));
|
|
|
final boolean isReady = fs.truncate(p, newLength);
|
|
|
LOG.info("newLength=" + newLength + ", isReady=" + isReady);
|
|
|
assertEquals("File must be closed for truncating at the block boundary",
|