|
@@ -17,9 +17,11 @@
|
|
|
*/
|
|
|
package org.apache.hadoop.hdfs;
|
|
|
|
|
|
+import java.io.ByteArrayInputStream;
|
|
|
import java.io.DataOutputStream;
|
|
|
import java.io.File;
|
|
|
import java.io.IOException;
|
|
|
+import java.io.InputStream;
|
|
|
import java.lang.reflect.Field;
|
|
|
import java.lang.reflect.InvocationTargetException;
|
|
|
import java.lang.reflect.Method;
|
|
@@ -32,8 +34,10 @@ import java.util.Random;
|
|
|
import org.apache.hadoop.conf.Configuration;
|
|
|
import org.apache.hadoop.fs.CreateFlag;
|
|
|
import org.apache.hadoop.fs.FSDataOutputStream;
|
|
|
+import org.apache.hadoop.fs.FileSystem;
|
|
|
import org.apache.hadoop.fs.FsTracer;
|
|
|
import org.apache.hadoop.fs.Path;
|
|
|
+import org.apache.hadoop.fs.StreamCapabilities.StreamCapability;
|
|
|
import org.apache.hadoop.fs.permission.FsPermission;
|
|
|
import org.apache.hadoop.hdfs.DataStreamer.LastExceptionInStreamer;
|
|
|
import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
|
|
@@ -48,6 +52,7 @@ import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
|
|
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
|
|
|
import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
|
|
|
import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
|
|
|
+import org.apache.hadoop.io.IOUtils;
|
|
|
import org.apache.hadoop.test.GenericTestUtils;
|
|
|
import org.apache.hadoop.test.PathUtils;
|
|
|
import org.apache.htrace.core.SpanId;
|
|
@@ -55,6 +60,8 @@ import org.junit.AfterClass;
|
|
|
import org.junit.Assert;
|
|
|
import org.junit.BeforeClass;
|
|
|
import org.junit.Test;
|
|
|
+
|
|
|
+import static org.junit.Assert.assertTrue;
|
|
|
import static org.mockito.Matchers.anyBoolean;
|
|
|
import static org.mockito.Matchers.anyLong;
|
|
|
import org.mockito.Mockito;
|
|
@@ -346,6 +353,24 @@ public class TestDFSOutputStream {
|
|
|
verify(spyClient, times(1)).endFileLease(anyLong());
|
|
|
}
|
|
|
|
|
|
+ @Test
|
|
|
+ public void testStreamFlush() throws Exception {
|
|
|
+ FileSystem fs = cluster.getFileSystem();
|
|
|
+ FSDataOutputStream os = fs.create(new Path("/normal-file"));
|
|
|
+ // Verify output stream supports hsync() and hflush().
|
|
|
+ assertTrue("DFSOutputStream should support hflush()!",
|
|
|
+ os.hasCapability(StreamCapability.HFLUSH.getValue()));
|
|
|
+ assertTrue("DFSOutputStream should support hsync()!",
|
|
|
+ os.hasCapability(StreamCapability.HSYNC.getValue()));
|
|
|
+ byte[] bytes = new byte[1024];
|
|
|
+ InputStream is = new ByteArrayInputStream(bytes);
|
|
|
+ IOUtils.copyBytes(is, os, bytes.length);
|
|
|
+ os.hflush();
|
|
|
+ IOUtils.copyBytes(is, os, bytes.length);
|
|
|
+ os.hsync();
|
|
|
+ os.close();
|
|
|
+ }
|
|
|
+
|
|
|
@AfterClass
|
|
|
public static void tearDown() {
|
|
|
if (cluster != null) {
|