|
@@ -30,6 +30,7 @@ import java.util.HashMap;
|
|
import java.util.List;
|
|
import java.util.List;
|
|
import java.util.Map;
|
|
import java.util.Map;
|
|
|
|
|
|
|
|
+import org.apache.hadoop.HadoopIllegalArgumentException;
|
|
import org.apache.hadoop.classification.InterfaceAudience;
|
|
import org.apache.hadoop.classification.InterfaceAudience;
|
|
import org.apache.hadoop.classification.InterfaceStability;
|
|
import org.apache.hadoop.classification.InterfaceStability;
|
|
import org.apache.hadoop.conf.Configuration;
|
|
import org.apache.hadoop.conf.Configuration;
|
|
@@ -2734,7 +2735,7 @@ public class DistributedFileSystem extends FileSystem {
|
|
*/
|
|
*/
|
|
public static final class HdfsDataOutputStreamBuilder
|
|
public static final class HdfsDataOutputStreamBuilder
|
|
extends FSDataOutputStreamBuilder<
|
|
extends FSDataOutputStreamBuilder<
|
|
- HdfsDataOutputStream, HdfsDataOutputStreamBuilder> {
|
|
|
|
|
|
+ FSDataOutputStream, HdfsDataOutputStreamBuilder> {
|
|
private final DistributedFileSystem dfs;
|
|
private final DistributedFileSystem dfs;
|
|
private InetSocketAddress[] favoredNodes = null;
|
|
private InetSocketAddress[] favoredNodes = null;
|
|
private String ecPolicyName = null;
|
|
private String ecPolicyName = null;
|
|
@@ -2857,17 +2858,24 @@ public class DistributedFileSystem extends FileSystem {
|
|
* @throws IOException on I/O errors.
|
|
* @throws IOException on I/O errors.
|
|
*/
|
|
*/
|
|
@Override
|
|
@Override
|
|
- public HdfsDataOutputStream build() throws IOException {
|
|
|
|
- if (isRecursive()) {
|
|
|
|
- return dfs.create(getPath(), getPermission(), getFlags(),
|
|
|
|
- getBufferSize(), getReplication(), getBlockSize(),
|
|
|
|
- getProgress(), getChecksumOpt(), getFavoredNodes(),
|
|
|
|
- getEcPolicyName());
|
|
|
|
- } else {
|
|
|
|
- return dfs.createNonRecursive(getPath(), getPermission(), getFlags(),
|
|
|
|
- getBufferSize(), getReplication(), getBlockSize(), getProgress(),
|
|
|
|
- getChecksumOpt(), getFavoredNodes(), getEcPolicyName());
|
|
|
|
|
|
+ public FSDataOutputStream build() throws IOException {
|
|
|
|
+ if (getFlags().contains(CreateFlag.CREATE)) {
|
|
|
|
+ if (isRecursive()) {
|
|
|
|
+ return dfs.create(getPath(), getPermission(), getFlags(),
|
|
|
|
+ getBufferSize(), getReplication(), getBlockSize(),
|
|
|
|
+ getProgress(), getChecksumOpt(), getFavoredNodes(),
|
|
|
|
+ getEcPolicyName());
|
|
|
|
+ } else {
|
|
|
|
+ return dfs.createNonRecursive(getPath(), getPermission(), getFlags(),
|
|
|
|
+ getBufferSize(), getReplication(), getBlockSize(), getProgress(),
|
|
|
|
+ getChecksumOpt(), getFavoredNodes(), getEcPolicyName());
|
|
|
|
+ }
|
|
|
|
+ } else if (getFlags().contains(CreateFlag.APPEND)) {
|
|
|
|
+ return dfs.append(getPath(), getFlags(), getBufferSize(), getProgress(),
|
|
|
|
+ getFavoredNodes());
|
|
}
|
|
}
|
|
|
|
+ throw new HadoopIllegalArgumentException(
|
|
|
|
+ "Must specify either create or append");
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
|
|
@@ -2896,4 +2904,15 @@ public class DistributedFileSystem extends FileSystem {
|
|
public RemoteIterator<OpenFileEntry> listOpenFiles() throws IOException {
|
|
public RemoteIterator<OpenFileEntry> listOpenFiles() throws IOException {
|
|
return dfs.listOpenFiles();
|
|
return dfs.listOpenFiles();
|
|
}
|
|
}
|
|
|
|
+
|
|
|
|
+ /**
|
|
|
|
+ * Create a {@link HdfsDataOutputStreamBuilder} to append a file on DFS.
|
|
|
|
+ *
|
|
|
|
+ * @param path file path.
|
|
|
|
+ * @return A {@link HdfsDataOutputStreamBuilder} for appending a file.
|
|
|
|
+ */
|
|
|
|
+ @Override
|
|
|
|
+ public HdfsDataOutputStreamBuilder appendFile(Path path) {
|
|
|
|
+ return new HdfsDataOutputStreamBuilder(this, path).append();
|
|
|
|
+ }
|
|
}
|
|
}
|