Browse Source

HDFS-3702. Add an option for NOT writing the blocks locally if there is a datanode on the same box as the client. (Contributed by Lei (Eddy) Xu)

Lei Xu 9 years ago
parent
commit
0a152103f1
41 changed files with 367 additions and 93 deletions
  1. 8 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java
  2. 59 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java
  3. 19 6
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java
  4. 3 3
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java
  5. 10 6
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java
  6. 4 2
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java
  7. 4 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java
  8. 6 1
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java
  9. 27 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java
  10. 5 0
      hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto
  11. 5 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java
  12. 6 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  13. 8 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java
  14. 40 11
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java
  15. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java
  16. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java
  17. 5 4
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java
  18. 4 2
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  19. 4 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java
  20. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java
  21. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java
  22. 9 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  23. 58 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java
  24. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java
  25. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java
  26. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java
  27. 43 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
  28. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java
  29. 5 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java
  30. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithUpgradeDomain.java
  31. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
  32. 4 4
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java
  33. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java
  34. 2 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java
  35. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java
  36. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java
  37. 5 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java
  38. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java
  39. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java
  40. 1 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java
  41. 3 3
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java

+ 8 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CreateFlag.java

@@ -103,7 +103,14 @@ public enum CreateFlag {
    * Append data to a new block instead of the end of the last partial block.
    * Append data to a new block instead of the end of the last partial block.
    * This is only useful for APPEND.
    * This is only useful for APPEND.
    */
    */
-  NEW_BLOCK((short) 0x20);
+  NEW_BLOCK((short) 0x20),
+
+  /**
+   * Advise that a block replica NOT be written to the local DataNode where
+   * 'local' means the same host as the client is being run on.
+   */
+  @InterfaceAudience.LimitedPrivate({"HBase"})
+  NO_LOCAL_WRITE((short) 0x40);
 
 
   private final short mode;
   private final short mode;
 
 

+ 59 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/AddBlockFlag.java

@@ -0,0 +1,59 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.CreateFlag;
+
+/**
+ * AddBlockFlag provides hints for new block allocation and placement.
+ * Users can use this flag to control <em>per DFSOutputStream</em>
+ * {@see ClientProtocol#addBlock()} behavior.
+ */
+@InterfaceAudience.Private
+@InterfaceStability.Evolving
+public enum AddBlockFlag {
+
+  /**
+   * Advise that a block replica NOT be written to the local DataNode where
+   * 'local' means the same host as the client is being run on.
+   *
+   * @see CreateFlag#NO_LOCAL_WRITE
+   */
+  NO_LOCAL_WRITE((short) 0x01);
+
+  private final short mode;
+
+  AddBlockFlag(short mode) {
+    this.mode = mode;
+  }
+
+  public static AddBlockFlag valueOf(short mode) {
+    for (AddBlockFlag flag : AddBlockFlag.values()) {
+      if (flag.getMode() == mode) {
+        return flag;
+      }
+    }
+    return null;
+  }
+
+  public short getMode() {
+    return mode;
+  }
+}

+ 19 - 6
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -117,6 +117,7 @@ public class DFSOutputStream extends FSOutputSummer
   private long initialFileSize = 0; // at time of file open
   private long initialFileSize = 0; // at time of file open
   private final short blockReplication; // replication factor of file
   private final short blockReplication; // replication factor of file
   protected boolean shouldSyncBlock = false; // force blocks to disk upon close
   protected boolean shouldSyncBlock = false; // force blocks to disk upon close
+  private final EnumSet<AddBlockFlag> addBlockFlags;
   protected final AtomicReference<CachingStrategy> cachingStrategy;
   protected final AtomicReference<CachingStrategy> cachingStrategy;
   private FileEncryptionInfo fileEncryptionInfo;
   private FileEncryptionInfo fileEncryptionInfo;
 
 
@@ -179,6 +180,7 @@ public class DFSOutputStream extends FSOutputSummer
   }
   }
 
 
   private DFSOutputStream(DFSClient dfsClient, String src,
   private DFSOutputStream(DFSClient dfsClient, String src,
+      EnumSet<CreateFlag> flag,
       Progressable progress, HdfsFileStatus stat, DataChecksum checksum) {
       Progressable progress, HdfsFileStatus stat, DataChecksum checksum) {
     super(getChecksum4Compute(checksum, stat));
     super(getChecksum4Compute(checksum, stat));
     this.dfsClient = dfsClient;
     this.dfsClient = dfsClient;
@@ -189,6 +191,10 @@ public class DFSOutputStream extends FSOutputSummer
     this.fileEncryptionInfo = stat.getFileEncryptionInfo();
     this.fileEncryptionInfo = stat.getFileEncryptionInfo();
     this.cachingStrategy = new AtomicReference<>(
     this.cachingStrategy = new AtomicReference<>(
         dfsClient.getDefaultWriteCachingStrategy());
         dfsClient.getDefaultWriteCachingStrategy());
+    this.addBlockFlags = EnumSet.noneOf(AddBlockFlag.class);
+    if (flag.contains(CreateFlag.NO_LOCAL_WRITE)) {
+      this.addBlockFlags.add(AddBlockFlag.NO_LOCAL_WRITE);
+    }
     if (progress != null) {
     if (progress != null) {
       DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
       DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream "
           +"{}", src);
           +"{}", src);
@@ -212,7 +218,7 @@ public class DFSOutputStream extends FSOutputSummer
   protected DFSOutputStream(DFSClient dfsClient, String src,
   protected DFSOutputStream(DFSClient dfsClient, String src,
       HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress,
       HdfsFileStatus stat, EnumSet<CreateFlag> flag, Progressable progress,
       DataChecksum checksum, String[] favoredNodes, boolean createStreamer) {
       DataChecksum checksum, String[] favoredNodes, boolean createStreamer) {
-    this(dfsClient, src, progress, stat, checksum);
+    this(dfsClient, src, flag, progress, stat, checksum);
     this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK);
     this.shouldSyncBlock = flag.contains(CreateFlag.SYNC_BLOCK);
 
 
     computePacketChunkSize(dfsClient.getConf().getWritePacketSize(),
     computePacketChunkSize(dfsClient.getConf().getWritePacketSize(),
@@ -220,7 +226,8 @@ public class DFSOutputStream extends FSOutputSummer
 
 
     if (createStreamer) {
     if (createStreamer) {
       streamer = new DataStreamer(stat, null, dfsClient, src, progress,
       streamer = new DataStreamer(stat, null, dfsClient, src, progress,
-          checksum, cachingStrategy, byteArrayManager, favoredNodes);
+          checksum, cachingStrategy, byteArrayManager, favoredNodes,
+          addBlockFlags);
     }
     }
   }
   }
 
 
@@ -289,7 +296,7 @@ public class DFSOutputStream extends FSOutputSummer
       EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
       EnumSet<CreateFlag> flags, Progressable progress, LocatedBlock lastBlock,
       HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes)
       HdfsFileStatus stat, DataChecksum checksum, String[] favoredNodes)
       throws IOException {
       throws IOException {
-    this(dfsClient, src, progress, stat, checksum);
+    this(dfsClient, src, flags, progress, stat, checksum);
     initialFileSize = stat.getLen(); // length of file when opened
     initialFileSize = stat.getLen(); // length of file when opened
     this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);
     this.shouldSyncBlock = flags.contains(CreateFlag.SYNC_BLOCK);
 
 
@@ -310,7 +317,8 @@ public class DFSOutputStream extends FSOutputSummer
           bytesPerChecksum);
           bytesPerChecksum);
       streamer = new DataStreamer(stat,
       streamer = new DataStreamer(stat,
           lastBlock != null ? lastBlock.getBlock() : null, dfsClient, src,
           lastBlock != null ? lastBlock.getBlock() : null, dfsClient, src,
-          progress, checksum, cachingStrategy, byteArrayManager, favoredNodes);
+          progress, checksum, cachingStrategy, byteArrayManager, favoredNodes,
+          addBlockFlags);
     }
     }
   }
   }
 
 
@@ -844,6 +852,10 @@ public class DFSOutputStream extends FSOutputSummer
     return initialFileSize;
     return initialFileSize;
   }
   }
 
 
+  protected EnumSet<AddBlockFlag> getAddBlockFlags() {
+    return addBlockFlags;
+  }
+
   /**
   /**
    * @return the FileEncryptionInfo for this stream, or null if not encrypted.
    * @return the FileEncryptionInfo for this stream, or null if not encrypted.
    */
    */
@@ -916,7 +928,8 @@ public class DFSOutputStream extends FSOutputSummer
 
 
   static LocatedBlock addBlock(DatanodeInfo[] excludedNodes,
   static LocatedBlock addBlock(DatanodeInfo[] excludedNodes,
       DFSClient dfsClient, String src, ExtendedBlock prevBlock, long fileId,
       DFSClient dfsClient, String src, ExtendedBlock prevBlock, long fileId,
-      String[] favoredNodes) throws IOException {
+      String[] favoredNodes, EnumSet<AddBlockFlag> allocFlags)
+      throws IOException {
     final DfsClientConf conf = dfsClient.getConf();
     final DfsClientConf conf = dfsClient.getConf();
     int retries = conf.getNumBlockWriteLocateFollowingRetry();
     int retries = conf.getNumBlockWriteLocateFollowingRetry();
     long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
     long sleeptime = conf.getBlockWriteLocateFollowingInitialDelayMs();
@@ -924,7 +937,7 @@ public class DFSOutputStream extends FSOutputSummer
     while (true) {
     while (true) {
       try {
       try {
         return dfsClient.namenode.addBlock(src, dfsClient.clientName, prevBlock,
         return dfsClient.namenode.addBlock(src, dfsClient.clientName, prevBlock,
-            excludedNodes, fileId, favoredNodes);
+            excludedNodes, fileId, favoredNodes, allocFlags);
       } catch (RemoteException e) {
       } catch (RemoteException e) {
         IOException ue = e.unwrapRemoteException(FileNotFoundException.class,
         IOException ue = e.unwrapRemoteException(FileNotFoundException.class,
             AccessControlException.class,
             AccessControlException.class,

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DFSStripedOutputStream.java

@@ -301,7 +301,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
     for (short i = 0; i < numAllBlocks; i++) {
     for (short i = 0; i < numAllBlocks; i++) {
       StripedDataStreamer streamer = new StripedDataStreamer(stat,
       StripedDataStreamer streamer = new StripedDataStreamer(stat,
           dfsClient, src, progress, checksum, cachingStrategy, byteArrayManager,
           dfsClient, src, progress, checksum, cachingStrategy, byteArrayManager,
-          favoredNodes, i, coordinator);
+          favoredNodes, i, coordinator, getAddBlockFlags());
       streamers.add(streamer);
       streamers.add(streamer);
     }
     }
     currentPackets = new DFSPacket[streamers.size()];
     currentPackets = new DFSPacket[streamers.size()];
@@ -406,7 +406,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
         StripedDataStreamer streamer = new StripedDataStreamer(oldStreamer.stat,
         StripedDataStreamer streamer = new StripedDataStreamer(oldStreamer.stat,
             dfsClient, src, oldStreamer.progress,
             dfsClient, src, oldStreamer.progress,
             oldStreamer.checksum4WriteBlock, cachingStrategy, byteArrayManager,
             oldStreamer.checksum4WriteBlock, cachingStrategy, byteArrayManager,
-            favoredNodes, i, coordinator);
+            favoredNodes, i, coordinator, getAddBlockFlags());
         streamers.set(i, streamer);
         streamers.set(i, streamer);
         currentPackets[i] = null;
         currentPackets[i] = null;
         if (i == currentIndex) {
         if (i == currentIndex) {
@@ -458,7 +458,7 @@ public class DFSStripedOutputStream extends DFSOutputStream {
     LOG.debug("Allocating new block group. The previous block group: "
     LOG.debug("Allocating new block group. The previous block group: "
         + currentBlockGroup);
         + currentBlockGroup);
     final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
     final LocatedBlock lb = addBlock(excludedNodes, dfsClient, src,
-        currentBlockGroup, fileId, favoredNodes);
+        currentBlockGroup, fileId, favoredNodes, getAddBlockFlags());
     assert lb.isStriped();
     assert lb.isStriped();
     if (lb.getLocations().length < numDataBlocks) {
     if (lb.getLocations().length < numDataBlocks) {
       throw new IOException("Failed to get " + numDataBlocks
       throw new IOException("Failed to get " + numDataBlocks

+ 10 - 6
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/DataStreamer.java

@@ -32,6 +32,7 @@ import java.net.Socket;
 import java.nio.channels.ClosedChannelException;
 import java.nio.channels.ClosedChannelException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
@@ -412,13 +413,15 @@ class DataStreamer extends Daemon {
 
 
   protected final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes;
   protected final LoadingCache<DatanodeInfo, DatanodeInfo> excludedNodes;
   private final String[] favoredNodes;
   private final String[] favoredNodes;
+  private final EnumSet<AddBlockFlag> addBlockFlags;
 
 
   private DataStreamer(HdfsFileStatus stat, ExtendedBlock block,
   private DataStreamer(HdfsFileStatus stat, ExtendedBlock block,
                        DFSClient dfsClient, String src,
                        DFSClient dfsClient, String src,
                        Progressable progress, DataChecksum checksum,
                        Progressable progress, DataChecksum checksum,
                        AtomicReference<CachingStrategy> cachingStrategy,
                        AtomicReference<CachingStrategy> cachingStrategy,
                        ByteArrayManager byteArrayManage,
                        ByteArrayManager byteArrayManage,
-                       boolean isAppend, String[] favoredNodes) {
+                       boolean isAppend, String[] favoredNodes,
+                       EnumSet<AddBlockFlag> flags) {
     this.block = block;
     this.block = block;
     this.dfsClient = dfsClient;
     this.dfsClient = dfsClient;
     this.src = src;
     this.src = src;
@@ -430,11 +433,11 @@ class DataStreamer extends Daemon {
     this.isLazyPersistFile = isLazyPersist(stat);
     this.isLazyPersistFile = isLazyPersist(stat);
     this.isAppend = isAppend;
     this.isAppend = isAppend;
     this.favoredNodes = favoredNodes;
     this.favoredNodes = favoredNodes;
-
     final DfsClientConf conf = dfsClient.getConf();
     final DfsClientConf conf = dfsClient.getConf();
     this.dfsclientSlowLogThresholdMs = conf.getSlowIoWarningThresholdMs();
     this.dfsclientSlowLogThresholdMs = conf.getSlowIoWarningThresholdMs();
     this.excludedNodes = initExcludedNodes(conf.getExcludedNodesCacheExpiry());
     this.excludedNodes = initExcludedNodes(conf.getExcludedNodesCacheExpiry());
     this.errorState = new ErrorState(conf.getDatanodeRestartTimeout());
     this.errorState = new ErrorState(conf.getDatanodeRestartTimeout());
+    this.addBlockFlags = flags;
   }
   }
 
 
   /**
   /**
@@ -443,9 +446,10 @@ class DataStreamer extends Daemon {
   DataStreamer(HdfsFileStatus stat, ExtendedBlock block, DFSClient dfsClient,
   DataStreamer(HdfsFileStatus stat, ExtendedBlock block, DFSClient dfsClient,
                String src, Progressable progress, DataChecksum checksum,
                String src, Progressable progress, DataChecksum checksum,
                AtomicReference<CachingStrategy> cachingStrategy,
                AtomicReference<CachingStrategy> cachingStrategy,
-               ByteArrayManager byteArrayManage, String[] favoredNodes) {
+               ByteArrayManager byteArrayManage, String[] favoredNodes,
+               EnumSet<AddBlockFlag> flags) {
     this(stat, block, dfsClient, src, progress, checksum, cachingStrategy,
     this(stat, block, dfsClient, src, progress, checksum, cachingStrategy,
-        byteArrayManage, false, favoredNodes);
+        byteArrayManage, false, favoredNodes, flags);
     stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
     stage = BlockConstructionStage.PIPELINE_SETUP_CREATE;
   }
   }
 
 
@@ -459,7 +463,7 @@ class DataStreamer extends Daemon {
                AtomicReference<CachingStrategy> cachingStrategy,
                AtomicReference<CachingStrategy> cachingStrategy,
                ByteArrayManager byteArrayManage) {
                ByteArrayManager byteArrayManage) {
     this(stat, lastBlock.getBlock(), dfsClient, src, progress, checksum, cachingStrategy,
     this(stat, lastBlock.getBlock(), dfsClient, src, progress, checksum, cachingStrategy,
-        byteArrayManage, true, null);
+        byteArrayManage, true, null, null);
     stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
     stage = BlockConstructionStage.PIPELINE_SETUP_APPEND;
     bytesSent = block.getNumBytes();
     bytesSent = block.getNumBytes();
     accessToken = lastBlock.getBlockToken();
     accessToken = lastBlock.getBlockToken();
@@ -1679,7 +1683,7 @@ class DataStreamer extends Daemon {
   private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes)
   private LocatedBlock locateFollowingBlock(DatanodeInfo[] excludedNodes)
       throws IOException {
       throws IOException {
     return DFSOutputStream.addBlock(excludedNodes, dfsClient, src, block,
     return DFSOutputStream.addBlock(excludedNodes, dfsClient, src, block,
-        stat.getFileId(), favoredNodes);
+        stat.getFileId(), favoredNodes, addBlockFlags);
   }
   }
 
 
   /**
   /**

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/StripedDataStreamer.java

@@ -19,6 +19,7 @@
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.util.EnumSet;
 import java.util.concurrent.atomic.AtomicReference;
 import java.util.concurrent.atomic.AtomicReference;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -52,9 +53,10 @@ public class StripedDataStreamer extends DataStreamer {
                       Progressable progress, DataChecksum checksum,
                       Progressable progress, DataChecksum checksum,
                       AtomicReference<CachingStrategy> cachingStrategy,
                       AtomicReference<CachingStrategy> cachingStrategy,
                       ByteArrayManager byteArrayManage, String[] favoredNodes,
                       ByteArrayManager byteArrayManage, String[] favoredNodes,
-                      short index, Coordinator coordinator) {
+                      short index, Coordinator coordinator,
+                      final EnumSet<AddBlockFlag> flags) {
     super(stat, null, dfsClient, src, progress, checksum, cachingStrategy,
     super(stat, null, dfsClient, src, progress, checksum, cachingStrategy,
-        byteArrayManage, favoredNodes);
+        byteArrayManage, favoredNodes, flags);
     this.index = index;
     this.index = index;
     this.coordinator = coordinator;
     this.coordinator = coordinator;
   }
   }

+ 4 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -25,6 +25,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
@@ -390,6 +391,8 @@ public interface ClientProtocol {
    * @param fileId the id uniquely identifying a file
    * @param fileId the id uniquely identifying a file
    * @param favoredNodes the list of nodes where the client wants the blocks.
    * @param favoredNodes the list of nodes where the client wants the blocks.
    *          Nodes are identified by either host name or address.
    *          Nodes are identified by either host name or address.
+   * @param addBlockFlags flags to advise the behavior of allocating and placing
+   *                      a new block.
    *
    *
    * @return LocatedBlock allocated block information.
    * @return LocatedBlock allocated block information.
    *
    *
@@ -408,7 +411,7 @@ public interface ClientProtocol {
   @Idempotent
   @Idempotent
   LocatedBlock addBlock(String src, String clientName,
   LocatedBlock addBlock(String src, String clientName,
       ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
       ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
-      String[] favoredNodes)
+      String[] favoredNodes, EnumSet<AddBlockFlag> addBlockFlags)
       throws IOException;
       throws IOException;
 
 
   /**
   /**

+ 6 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolTranslatorPB.java

@@ -404,7 +404,8 @@ public class ClientNamenodeProtocolTranslatorPB implements
   @Override
   @Override
   public LocatedBlock addBlock(String src, String clientName,
   public LocatedBlock addBlock(String src, String clientName,
       ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
       ExtendedBlock previous, DatanodeInfo[] excludeNodes, long fileId,
-      String[] favoredNodes) throws IOException {
+      String[] favoredNodes, EnumSet<AddBlockFlag> addBlockFlags)
+      throws IOException {
     AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
     AddBlockRequestProto.Builder req = AddBlockRequestProto.newBuilder()
         .setSrc(src).setClientName(clientName).setFileId(fileId);
         .setSrc(src).setClientName(clientName).setFileId(fileId);
     if (previous != null)
     if (previous != null)
@@ -414,6 +415,10 @@ public class ClientNamenodeProtocolTranslatorPB implements
     if (favoredNodes != null) {
     if (favoredNodes != null) {
       req.addAllFavoredNodes(Arrays.asList(favoredNodes));
       req.addAllFavoredNodes(Arrays.asList(favoredNodes));
     }
     }
+    if (addBlockFlags != null) {
+      req.addAllFlags(PBHelperClient.convertAddBlockFlags(
+          addBlockFlags));
+    }
     try {
     try {
       return PBHelperClient.convertLocatedBlockProto(
       return PBHelperClient.convertLocatedBlockProto(
           rpcProxy.addBlock(null, req.build()).getBlock());
           rpcProxy.addBlock(null, req.build()).getBlock());

+ 27 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocolPB/PBHelperClient.java

@@ -37,6 +37,7 @@ import com.google.protobuf.CodedInputStream;
 import org.apache.hadoop.crypto.CipherOption;
 import org.apache.hadoop.crypto.CipherOption;
 import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
@@ -97,6 +98,7 @@ import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.AclEntryTyp
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclEntryProto.FsActionProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.AclStatusProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.AclProtos.GetAclStatusResponseProto;
+import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.AddBlockFlagProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveEntryProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoExpirationProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
 import org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos.CacheDirectiveInfoProto;
@@ -2528,4 +2530,29 @@ public class PBHelperClient {
     }
     }
     return builder.build();
     return builder.build();
   }
   }
+
+  public static EnumSet<AddBlockFlag> convertAddBlockFlags(
+      List<AddBlockFlagProto> addBlockFlags) {
+    EnumSet<AddBlockFlag> flags =
+        EnumSet.noneOf(AddBlockFlag.class);
+    for (AddBlockFlagProto af : addBlockFlags) {
+      AddBlockFlag flag = AddBlockFlag.valueOf((short)af.getNumber());
+      if (flag != null) {
+        flags.add(flag);
+      }
+    }
+    return flags;
+  }
+
+  public static List<AddBlockFlagProto> convertAddBlockFlags(
+      EnumSet<AddBlockFlag> flags) {
+    List<AddBlockFlagProto> ret = new ArrayList<>();
+    for (AddBlockFlag flag : flags) {
+      AddBlockFlagProto abfp = AddBlockFlagProto.valueOf(flag.getMode());
+      if (abfp != null) {
+        ret.add(abfp);
+      }
+    }
+    return ret;
+  }
 }
 }

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/proto/ClientNamenodeProtocol.proto

@@ -162,6 +162,10 @@ message AbandonBlockRequestProto {
 message AbandonBlockResponseProto { // void response
 message AbandonBlockResponseProto { // void response
 }
 }
 
 
+enum AddBlockFlagProto {
+  NO_LOCAL_WRITE = 1; // avoid writing to local node.
+}
+
 message AddBlockRequestProto {
 message AddBlockRequestProto {
   required string src = 1;
   required string src = 1;
   required string clientName = 2;
   required string clientName = 2;
@@ -169,6 +173,7 @@ message AddBlockRequestProto {
   repeated DatanodeInfoProto excludeNodes = 4;
   repeated DatanodeInfoProto excludeNodes = 4;
   optional uint64 fileId = 5 [default = 0];  // default as a bogus id
   optional uint64 fileId = 5 [default = 0];  // default as a bogus id
   repeated string favoredNodes = 6; //the set of datanodes to use for the block
   repeated string favoredNodes = 6; //the set of datanodes to use for the block
+  repeated AddBlockFlagProto flags = 7; // default to empty.
 }
 }
 
 
 message AddBlockResponseProto {
 message AddBlockResponseProto {

+ 5 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/protocolPB/ClientNamenodeProtocolServerSideTranslatorPB.java

@@ -24,6 +24,7 @@ import java.util.List;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
@@ -505,6 +506,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
     try {
     try {
       List<DatanodeInfoProto> excl = req.getExcludeNodesList();
       List<DatanodeInfoProto> excl = req.getExcludeNodesList();
       List<String> favor = req.getFavoredNodesList();
       List<String> favor = req.getFavoredNodesList();
+      EnumSet<AddBlockFlag> flags =
+          PBHelperClient.convertAddBlockFlags(req.getFlagsList());
       LocatedBlock result = server.addBlock(
       LocatedBlock result = server.addBlock(
           req.getSrc(),
           req.getSrc(),
           req.getClientName(),
           req.getClientName(),
@@ -512,7 +515,8 @@ public class ClientNamenodeProtocolServerSideTranslatorPB implements
           (excl == null || excl.size() == 0) ? null : PBHelperClient.convert(excl
           (excl == null || excl.size() == 0) ? null : PBHelperClient.convert(excl
               .toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(),
               .toArray(new DatanodeInfoProto[excl.size()])), req.getFileId(),
           (favor == null || favor.size() == 0) ? null : favor
           (favor == null || favor.size() == 0) ? null : favor
-              .toArray(new String[favor.size()]));
+              .toArray(new String[favor.size()]),
+          flags);
       return AddBlockResponseProto.newBuilder()
       return AddBlockResponseProto.newBuilder()
           .setBlock(PBHelperClient.convertLocatedBlock(result)).build();
           .setBlock(PBHelperClient.convertLocatedBlock(result)).build();
     } catch (IOException e) {
     } catch (IOException e) {

+ 6 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -48,6 +48,7 @@ import javax.management.ObjectName;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSUtilClient;
 import org.apache.hadoop.hdfs.DFSUtilClient;
@@ -1782,7 +1783,7 @@ public class BlockManager implements BlockStatsMXBean {
       DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) {
       DatanodeDescriptor clientnode, Set<Node> excludes, long blocksize) {
     return placementPolicies.getPolicy(false).chooseTarget(src, 1, clientnode,
     return placementPolicies.getPolicy(false).chooseTarget(src, 1, clientnode,
         Collections.<DatanodeStorageInfo>emptyList(), false, excludes,
         Collections.<DatanodeStorageInfo>emptyList(), false, excludes,
-        blocksize, storagePolicySuite.getDefaultPolicy());
+        blocksize, storagePolicySuite.getDefaultPolicy(), null);
   }
   }
 
 
   /** Choose target for getting additional datanodes for an existing pipeline. */
   /** Choose target for getting additional datanodes for an existing pipeline. */
@@ -1797,7 +1798,7 @@ public class BlockManager implements BlockStatsMXBean {
     final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
     final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
     final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(isStriped);
     final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(isStriped);
     return blockplacement.chooseTarget(src, numAdditionalNodes, clientnode,
     return blockplacement.chooseTarget(src, numAdditionalNodes, clientnode,
-        chosen, true, excludes, blocksize, storagePolicy);
+        chosen, true, excludes, blocksize, storagePolicy, null);
   }
   }
 
 
   /**
   /**
@@ -1814,14 +1815,15 @@ public class BlockManager implements BlockStatsMXBean {
       final long blocksize,
       final long blocksize,
       final List<String> favoredNodes,
       final List<String> favoredNodes,
       final byte storagePolicyID,
       final byte storagePolicyID,
-      final boolean isStriped) throws IOException {
+      final boolean isStriped,
+      final EnumSet<AddBlockFlag> flags) throws IOException {
     List<DatanodeDescriptor> favoredDatanodeDescriptors = 
     List<DatanodeDescriptor> favoredDatanodeDescriptors = 
         getDatanodeDescriptors(favoredNodes);
         getDatanodeDescriptors(favoredNodes);
     final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
     final BlockStoragePolicy storagePolicy = storagePolicySuite.getPolicy(storagePolicyID);
     final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(isStriped);
     final BlockPlacementPolicy blockplacement = placementPolicies.getPolicy(isStriped);
     final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
     final DatanodeStorageInfo[] targets = blockplacement.chooseTarget(src,
         numOfReplicas, client, excludedNodes, blocksize, 
         numOfReplicas, client, excludedNodes, blocksize, 
-        favoredDatanodeDescriptors, storagePolicy);
+        favoredDatanodeDescriptors, storagePolicy, flags);
     if (targets.length < minReplication) {
     if (targets.length < minReplication) {
       throw new IOException("File " + src + " could only be replicated to "
       throw new IOException("File " + src + " could only be replicated to "
           + targets.length + " nodes instead of minReplication (="
           + targets.length + " nodes instead of minReplication (="

+ 8 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicy.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Set;
 import java.util.Set;
@@ -26,6 +27,7 @@ import java.util.Set;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -64,6 +66,7 @@ public abstract class BlockPlacementPolicy {
    * @param returnChosenNodes decide if the chosenNodes are returned.
    * @param returnChosenNodes decide if the chosenNodes are returned.
    * @param excludedNodes datanodes that should not be considered as targets.
    * @param excludedNodes datanodes that should not be considered as targets.
    * @param blocksize size of the data to be written.
    * @param blocksize size of the data to be written.
+   * @param flags Block placement flags.
    * @return array of DatanodeDescriptor instances chosen as target
    * @return array of DatanodeDescriptor instances chosen as target
    * and sorted as a pipeline.
    * and sorted as a pipeline.
    */
    */
@@ -74,7 +77,8 @@ public abstract class BlockPlacementPolicy {
                                              boolean returnChosenNodes,
                                              boolean returnChosenNodes,
                                              Set<Node> excludedNodes,
                                              Set<Node> excludedNodes,
                                              long blocksize,
                                              long blocksize,
-                                             BlockStoragePolicy storagePolicy);
+                                             BlockStoragePolicy storagePolicy,
+                                             EnumSet<AddBlockFlag> flags);
   
   
   /**
   /**
    * @param favoredNodes datanodes that should be favored as targets. This
    * @param favoredNodes datanodes that should be favored as targets. This
@@ -86,14 +90,15 @@ public abstract class BlockPlacementPolicy {
       Set<Node> excludedNodes,
       Set<Node> excludedNodes,
       long blocksize,
       long blocksize,
       List<DatanodeDescriptor> favoredNodes,
       List<DatanodeDescriptor> favoredNodes,
-      BlockStoragePolicy storagePolicy) {
+      BlockStoragePolicy storagePolicy,
+      EnumSet<AddBlockFlag> flags) {
     // This class does not provide the functionality of placing
     // This class does not provide the functionality of placing
     // a block in favored datanodes. The implementations of this class
     // a block in favored datanodes. The implementations of this class
     // are expected to provide this functionality
     // are expected to provide this functionality
 
 
     return chooseTarget(src, numOfReplicas, writer, 
     return chooseTarget(src, numOfReplicas, writer, 
         new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
         new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
-        excludedNodes, blocksize, storagePolicy);
+        excludedNodes, blocksize, storagePolicy, flags);
   }
   }
 
 
   /**
   /**

+ 40 - 11
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyDefault.java

@@ -23,6 +23,7 @@ import java.util.*;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.protocol.BlockStoragePolicy;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -111,9 +112,10 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                                     boolean returnChosenNodes,
                                     boolean returnChosenNodes,
                                     Set<Node> excludedNodes,
                                     Set<Node> excludedNodes,
                                     long blocksize,
                                     long blocksize,
-                                    final BlockStoragePolicy storagePolicy) {
+                                    final BlockStoragePolicy storagePolicy,
+                                    EnumSet<AddBlockFlag> flags) {
     return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes,
     return chooseTarget(numOfReplicas, writer, chosenNodes, returnChosenNodes,
-        excludedNodes, blocksize, storagePolicy);
+        excludedNodes, blocksize, storagePolicy, flags);
   }
   }
 
 
   @Override
   @Override
@@ -123,13 +125,14 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       Set<Node> excludedNodes,
       Set<Node> excludedNodes,
       long blocksize,
       long blocksize,
       List<DatanodeDescriptor> favoredNodes,
       List<DatanodeDescriptor> favoredNodes,
-      BlockStoragePolicy storagePolicy) {
+      BlockStoragePolicy storagePolicy,
+      EnumSet<AddBlockFlag> flags) {
     try {
     try {
       if (favoredNodes == null || favoredNodes.size() == 0) {
       if (favoredNodes == null || favoredNodes.size() == 0) {
         // Favored nodes not specified, fall back to regular block placement.
         // Favored nodes not specified, fall back to regular block placement.
         return chooseTarget(src, numOfReplicas, writer,
         return chooseTarget(src, numOfReplicas, writer,
             new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
             new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
-            excludedNodes, blocksize, storagePolicy);
+            excludedNodes, blocksize, storagePolicy, flags);
       }
       }
 
 
       Set<Node> favoriteAndExcludedNodes = excludedNodes == null ?
       Set<Node> favoriteAndExcludedNodes = excludedNodes == null ?
@@ -164,7 +167,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
         DatanodeStorageInfo[] remainingTargets =
         DatanodeStorageInfo[] remainingTargets =
             chooseTarget(src, numOfReplicas, writer,
             chooseTarget(src, numOfReplicas, writer,
                 new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
                 new ArrayList<DatanodeStorageInfo>(numOfReplicas), false,
-                favoriteAndExcludedNodes, blocksize, storagePolicy);
+                favoriteAndExcludedNodes, blocksize, storagePolicy, flags);
         for (int i = 0; i < remainingTargets.length; i++) {
         for (int i = 0; i < remainingTargets.length; i++) {
           results.add(remainingTargets[i]);
           results.add(remainingTargets[i]);
         }
         }
@@ -179,7 +182,7 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
       // Fall back to regular block placement disregarding favored nodes hint
       // Fall back to regular block placement disregarding favored nodes hint
       return chooseTarget(src, numOfReplicas, writer, 
       return chooseTarget(src, numOfReplicas, writer, 
           new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
           new ArrayList<DatanodeStorageInfo>(numOfReplicas), false, 
-          excludedNodes, blocksize, storagePolicy);
+          excludedNodes, blocksize, storagePolicy, flags);
     }
     }
   }
   }
 
 
@@ -213,7 +216,8 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
                                     boolean returnChosenNodes,
                                     boolean returnChosenNodes,
                                     Set<Node> excludedNodes,
                                     Set<Node> excludedNodes,
                                     long blocksize,
                                     long blocksize,
-                                    final BlockStoragePolicy storagePolicy) {
+                                    final BlockStoragePolicy storagePolicy,
+                                    EnumSet<AddBlockFlag> addBlockFlags) {
     if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
     if (numOfReplicas == 0 || clusterMap.getNumOfLeaves()==0) {
       return DatanodeStorageInfo.EMPTY_ARRAY;
       return DatanodeStorageInfo.EMPTY_ARRAY;
     }
     }
@@ -226,17 +230,42 @@ public class BlockPlacementPolicyDefault extends BlockPlacementPolicy {
     numOfReplicas = result[0];
     numOfReplicas = result[0];
     int maxNodesPerRack = result[1];
     int maxNodesPerRack = result[1];
       
       
-    final List<DatanodeStorageInfo> results = new ArrayList<>(chosenStorage);
     for (DatanodeStorageInfo storage : chosenStorage) {
     for (DatanodeStorageInfo storage : chosenStorage) {
       // add localMachine and related nodes to excludedNodes
       // add localMachine and related nodes to excludedNodes
       addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
       addToExcludedNodes(storage.getDatanodeDescriptor(), excludedNodes);
     }
     }
 
 
+    List<DatanodeStorageInfo> results = null;
+    Node localNode = null;
     boolean avoidStaleNodes = (stats != null
     boolean avoidStaleNodes = (stats != null
         && stats.isAvoidingStaleDataNodesForWrite());
         && stats.isAvoidingStaleDataNodesForWrite());
-    final Node localNode = chooseTarget(numOfReplicas, writer, excludedNodes,
-        blocksize, maxNodesPerRack, results, avoidStaleNodes, storagePolicy,
-        EnumSet.noneOf(StorageType.class), results.isEmpty());
+    boolean avoidLocalNode = (addBlockFlags != null
+        && addBlockFlags.contains(AddBlockFlag.NO_LOCAL_WRITE)
+        && writer != null
+        && !excludedNodes.contains(writer));
+    // Attempt to exclude local node if the client suggests so. If no enough
+    // nodes can be obtained, it falls back to the default block placement
+    // policy.
+    if (avoidLocalNode) {
+      results = new ArrayList<>(chosenStorage);
+      Set<Node> excludedNodeCopy = new HashSet<>(excludedNodes);
+      excludedNodeCopy.add(writer);
+      localNode = chooseTarget(numOfReplicas, writer,
+          excludedNodeCopy, blocksize, maxNodesPerRack, results,
+          avoidStaleNodes, storagePolicy,
+          EnumSet.noneOf(StorageType.class), results.isEmpty());
+      if (results.size() < numOfReplicas) {
+        // not enough nodes; discard results and fall back
+        results = null;
+      }
+    }
+    if (results == null) {
+      results = new ArrayList<>(chosenStorage);
+      localNode = chooseTarget(numOfReplicas, writer, excludedNodes,
+          blocksize, maxNodesPerRack, results, avoidStaleNodes,
+          storagePolicy, EnumSet.noneOf(StorageType.class), results.isEmpty());
+    }
+
     if (!returnChosenNodes) {  
     if (!returnChosenNodes) {  
       results.removeAll(chosenStorage);
       results.removeAll(chosenStorage);
     }
     }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java

@@ -61,7 +61,7 @@ class ErasureCodingWork extends BlockReconstructionWork {
         getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0],
         getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0],
         getLiveReplicaStorages(), false, excludedNodes,
         getLiveReplicaStorages(), false, excludedNodes,
         getBlock().getNumBytes(),
         getBlock().getNumBytes(),
-        storagePolicySuite.getPolicy(getBc().getStoragePolicyID()));
+        storagePolicySuite.getPolicy(getBc().getStoragePolicyID()), null);
     setTargets(chosenTargets);
     setTargets(chosenTargets);
   }
   }
 
 

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java

@@ -47,7 +47,8 @@ class ReplicationWork extends BlockReconstructionWork {
           getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0],
           getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0],
           getLiveReplicaStorages(), false, excludedNodes,
           getLiveReplicaStorages(), false, excludedNodes,
           getBlock().getNumBytes(),
           getBlock().getNumBytes(),
-          storagePolicySuite.getPolicy(getBc().getStoragePolicyID()));
+          storagePolicySuite.getPolicy(getBc().getStoragePolicyID()),
+          null);
       setTargets(chosenTargets);
       setTargets(chosenTargets);
     } finally {
     } finally {
       getSrcNodes()[0].decrementPendingReplicationWithoutTargets();
       getSrcNodes()[0].decrementPendingReplicationWithoutTargets();

+ 5 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSDirWriteFileOp.java

@@ -23,6 +23,7 @@ import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CipherSuite;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileAlreadyExistsException;
 import org.apache.hadoop.fs.FileEncryptionInfo;
 import org.apache.hadoop.fs.FileEncryptionInfo;
@@ -282,8 +283,9 @@ class FSDirWriteFileOp {
   }
   }
 
 
   static DatanodeStorageInfo[] chooseTargetForNewBlock(
   static DatanodeStorageInfo[] chooseTargetForNewBlock(
-      BlockManager bm, String src, DatanodeInfo[] excludedNodes, String[]
-      favoredNodes, ValidateAddBlockResult r) throws IOException {
+      BlockManager bm, String src, DatanodeInfo[] excludedNodes,
+      String[] favoredNodes, EnumSet<AddBlockFlag> flags,
+      ValidateAddBlockResult r) throws IOException {
     Node clientNode = bm.getDatanodeManager()
     Node clientNode = bm.getDatanodeManager()
         .getDatanodeByHost(r.clientMachine);
         .getDatanodeByHost(r.clientMachine);
     if (clientNode == null) {
     if (clientNode == null) {
@@ -297,12 +299,11 @@ class FSDirWriteFileOp {
     }
     }
     List<String> favoredNodesList = (favoredNodes == null) ? null
     List<String> favoredNodesList = (favoredNodes == null) ? null
         : Arrays.asList(favoredNodes);
         : Arrays.asList(favoredNodes);
-
     // choose targets for the new block to be allocated.
     // choose targets for the new block to be allocated.
     return bm.chooseTarget4NewBlock(src, r.numTargets, clientNode,
     return bm.chooseTarget4NewBlock(src, r.numTargets, clientNode,
                                     excludedNodesSet, r.blockSize,
                                     excludedNodesSet, r.blockSize,
                                     favoredNodesList, r.storagePolicyID,
                                     favoredNodesList, r.storagePolicyID,
-                                    r.isStriped);
+                                    r.isStriped, flags);
   }
   }
 
 
   /**
   /**

+ 4 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -136,6 +136,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
 import org.apache.hadoop.crypto.key.KeyProvider.Metadata;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
 import org.apache.hadoop.crypto.key.KeyProviderCryptoExtension;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedListEntries;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
@@ -2468,7 +2469,8 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
    */
    */
   LocatedBlock getAdditionalBlock(
   LocatedBlock getAdditionalBlock(
       String src, long fileId, String clientName, ExtendedBlock previous,
       String src, long fileId, String clientName, ExtendedBlock previous,
-      DatanodeInfo[] excludedNodes, String[] favoredNodes) throws IOException {
+      DatanodeInfo[] excludedNodes, String[] favoredNodes,
+      EnumSet<AddBlockFlag> flags) throws IOException {
     NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: {}  inodeId {}" +
     NameNode.stateChangeLog.debug("BLOCK* getAdditionalBlock: {}  inodeId {}" +
         " for {}", src, fileId, clientName);
         " for {}", src, fileId, clientName);
 
 
@@ -2492,7 +2494,7 @@ public class FSNamesystem implements Namesystem, FSNamesystemMBean,
     }
     }
 
 
     DatanodeStorageInfo[] targets = FSDirWriteFileOp.chooseTargetForNewBlock(
     DatanodeStorageInfo[] targets = FSDirWriteFileOp.chooseTargetForNewBlock(
-        blockManager, src, excludedNodes, favoredNodes, r);
+        blockManager, src, excludedNodes, favoredNodes, flags, r);
 
 
     checkOperation(OperationCategory.WRITE);
     checkOperation(OperationCategory.WRITE);
     writeLock();
     writeLock();

+ 4 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NameNodeRpcServer.java

@@ -48,6 +48,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.ReconfigurationTaskStatus;
 import org.apache.hadoop.conf.ReconfigurationTaskStatus;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.crypto.CryptoProtocolVersion;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
 import org.apache.hadoop.fs.BatchedRemoteIterator.BatchedEntries;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.CacheFlag;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
@@ -837,11 +838,11 @@ class NameNodeRpcServer implements NamenodeProtocols {
   @Override
   @Override
   public LocatedBlock addBlock(String src, String clientName,
   public LocatedBlock addBlock(String src, String clientName,
       ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
       ExtendedBlock previous, DatanodeInfo[] excludedNodes, long fileId,
-      String[] favoredNodes)
+      String[] favoredNodes, EnumSet<AddBlockFlag> addBlockFlags)
       throws IOException {
       throws IOException {
     checkNNStartup();
     checkNNStartup();
     LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
     LocatedBlock locatedBlock = namesystem.getAdditionalBlock(src, fileId,
-        clientName, previous, excludedNodes, favoredNodes);
+        clientName, previous, excludedNodes, favoredNodes, addBlockFlags);
     if (locatedBlock != null) {
     if (locatedBlock != null) {
       metrics.incrAddBlockOps();
       metrics.incrAddBlockOps();
     }
     }
@@ -1149,7 +1150,7 @@ class NameNodeRpcServer implements NamenodeProtocols {
     DatanodeInfo results[] = namesystem.datanodeReport(type);
     DatanodeInfo results[] = namesystem.datanodeReport(type);
     return results;
     return results;
   }
   }
-    
+
   @Override // ClientProtocol
   @Override // ClientProtocol
   public DatanodeStorageReport[] getDatanodeStorageReport(
   public DatanodeStorageReport[] getDatanodeStorageReport(
       DatanodeReportType type) throws IOException {
       DatanodeReportType type) throws IOException {

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -1967,7 +1967,7 @@ public class DFSTestUtil {
       String clientName, ExtendedBlock previous, int numStripes, int len)
       String clientName, ExtendedBlock previous, int numStripes, int len)
       throws Exception {
       throws Exception {
     fs.getClient().namenode.addBlock(file, clientName, previous, null,
     fs.getClient().namenode.addBlock(file, clientName, previous, null,
-        fileNode.getId(), null);
+        fileNode.getId(), null, null);
 
 
     final BlockInfo lastBlock = fileNode.getLastBlock();
     final BlockInfo lastBlock = fileNode.getLastBlock();
     final int groupSize = fileNode.getPreferredBlockReplication();
     final int groupSize = fileNode.getPreferredBlockReplication();

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestBlockStoragePolicy.java

@@ -1237,12 +1237,12 @@ public class TestBlockStoragePolicy {
 
 
     DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
     DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
-        new HashSet<Node>(), 0, policy1);
+        new HashSet<Node>(), 0, policy1, null);
     System.out.println(Arrays.asList(targets));
     System.out.println(Arrays.asList(targets));
     Assert.assertEquals(3, targets.length);
     Assert.assertEquals(3, targets.length);
     targets = replicator.chooseTarget("/foo", 3,
     targets = replicator.chooseTarget("/foo", 3,
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
-        new HashSet<Node>(), 0, policy2);
+        new HashSet<Node>(), 0, policy2, null);
     System.out.println(Arrays.asList(targets));
     System.out.println(Arrays.asList(targets));
     Assert.assertEquals(3, targets.length);
     Assert.assertEquals(3, targets.length);
   }
   }
@@ -1284,7 +1284,7 @@ public class TestBlockStoragePolicy {
 
 
     DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
     DatanodeStorageInfo[] targets = replicator.chooseTarget("/foo", 3,
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
         dataNodes[0], Collections.<DatanodeStorageInfo>emptyList(), false,
-        new HashSet<Node>(), 0, policy);
+        new HashSet<Node>(), 0, policy, null);
     System.out.println(policy.getName() + ": " + Arrays.asList(targets));
     System.out.println(policy.getName() + ": " + Arrays.asList(targets));
     Assert.assertEquals(2, targets.length);
     Assert.assertEquals(2, targets.length);
     Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());
     Assert.assertEquals(StorageType.SSD, targets[0].getStorageType());

+ 9 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -44,6 +44,7 @@ import java.net.URI;
 import java.security.MessageDigest;
 import java.security.MessageDigest;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
+import java.util.EnumSet;
 import java.util.List;
 import java.util.List;
 import java.util.Random;
 import java.util.Random;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
@@ -96,6 +97,7 @@ import org.apache.log4j.Level;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
 import org.junit.Test;
 import org.junit.Test;
+import org.mockito.Matchers;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
 import org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.internal.stubbing.answers.ThrowsException;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.invocation.InvocationOnMock;
@@ -252,7 +254,9 @@ public class TestDFSClientRetries {
                          anyString(),
                          anyString(),
                          any(ExtendedBlock.class),
                          any(ExtendedBlock.class),
                          any(DatanodeInfo[].class),
                          any(DatanodeInfo[].class),
-                         anyLong(), any(String[].class))).thenAnswer(answer);
+                         anyLong(), any(String[].class),
+                         Matchers.<EnumSet<AddBlockFlag>>any()))
+        .thenAnswer(answer);
     
     
     Mockito.doReturn(
     Mockito.doReturn(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
             new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
@@ -471,7 +475,8 @@ public class TestDFSClientRetries {
         }
         }
       }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
       }).when(spyNN).addBlock(Mockito.anyString(), Mockito.anyString(),
           Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
           Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
-          Mockito.anyLong(), Mockito.<String[]> any());
+          Mockito.anyLong(), Mockito.<String[]> any(),
+          Mockito.<EnumSet<AddBlockFlag>> any());
 
 
       doAnswer(new Answer<Boolean>() {
       doAnswer(new Answer<Boolean>() {
 
 
@@ -513,7 +518,8 @@ public class TestDFSClientRetries {
       Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
       Mockito.verify(spyNN, Mockito.atLeastOnce()).addBlock(
           Mockito.anyString(), Mockito.anyString(),
           Mockito.anyString(), Mockito.anyString(),
           Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
           Mockito.<ExtendedBlock> any(), Mockito.<DatanodeInfo[]> any(),
-          Mockito.anyLong(), Mockito.<String[]> any());
+          Mockito.anyLong(), Mockito.<String[]> any(),
+          Mockito.<EnumSet<AddBlockFlag>> any());
       Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
       Mockito.verify(spyNN, Mockito.atLeastOnce()).complete(
           Mockito.anyString(), Mockito.anyString(),
           Mockito.anyString(), Mockito.anyString(),
           Mockito.<ExtendedBlock>any(), anyLong());
           Mockito.<ExtendedBlock>any(), anyLong());

+ 58 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDFSOutputStream.java

@@ -22,19 +22,29 @@ import java.io.IOException;
 import java.lang.reflect.Field;
 import java.lang.reflect.Field;
 import java.lang.reflect.Method;
 import java.lang.reflect.Method;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.EnumSet;
 import java.util.LinkedList;
 import java.util.LinkedList;
-import java.util.concurrent.atomic.AtomicReference;
+import java.util.Map;
+import java.util.Random;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FsTracer;
 import org.apache.hadoop.fs.FsTracer;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DataStreamer.LastExceptionInStreamer;
 import org.apache.hadoop.hdfs.DataStreamer.LastExceptionInStreamer;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
 import org.apache.hadoop.hdfs.client.impl.DfsClientConf;
+import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.HdfsConstants;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
 import org.apache.hadoop.hdfs.protocol.datatransfer.BlockConstructionStage;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockManager;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeDescriptor;
+import org.apache.hadoop.hdfs.server.blockmanagement.DatanodeManager;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeStorage;
 import org.apache.htrace.core.SpanId;
 import org.apache.htrace.core.SpanId;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.Assert;
 import org.junit.Assert;
@@ -42,8 +52,11 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
 import org.mockito.internal.util.reflection.Whitebox;
 import org.mockito.internal.util.reflection.Whitebox;
 
 
+import static org.junit.Assert.assertEquals;
+import static org.mockito.Mockito.doReturn;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.mock;
 import static org.mockito.Mockito.doThrow;
 import static org.mockito.Mockito.doThrow;
+import static org.mockito.Mockito.spy;
 import static org.mockito.Mockito.when;
 import static org.mockito.Mockito.when;
 
 
 public class TestDFSOutputStream {
 public class TestDFSOutputStream {
@@ -52,7 +65,7 @@ public class TestDFSOutputStream {
   @BeforeClass
   @BeforeClass
   public static void setup() throws IOException {
   public static void setup() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
   }
   }
 
 
   /**
   /**
@@ -80,7 +93,7 @@ public class TestDFSOutputStream {
     try {
     try {
       dos.close();
       dos.close();
     } catch (IOException e) {
     } catch (IOException e) {
-      Assert.assertEquals(e, dummy);
+      assertEquals(e, dummy);
     }
     }
     thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
     thrown = (Throwable) Whitebox.getInternalState(ex, "thrown");
     Assert.assertNull(thrown);
     Assert.assertNull(thrown);
@@ -127,7 +140,7 @@ public class TestDFSOutputStream {
         mock(HdfsFileStatus.class),
         mock(HdfsFileStatus.class),
         mock(ExtendedBlock.class),
         mock(ExtendedBlock.class),
         client,
         client,
-        "foo", null, null, null, null, null);
+        "foo", null, null, null, null, null, null);
 
 
     DataOutputStream blockStream = mock(DataOutputStream.class);
     DataOutputStream blockStream = mock(DataOutputStream.class);
     doThrow(new IOException()).when(blockStream).flush();
     doThrow(new IOException()).when(blockStream).flush();
@@ -148,6 +161,47 @@ public class TestDFSOutputStream {
     Assert.assertTrue(congestedNodes.isEmpty());
     Assert.assertTrue(congestedNodes.isEmpty());
   }
   }
 
 
+  @Test
+  public void testNoLocalWriteFlag() throws IOException {
+    DistributedFileSystem fs = cluster.getFileSystem();
+    EnumSet<CreateFlag> flags = EnumSet.of(CreateFlag.NO_LOCAL_WRITE,
+        CreateFlag.CREATE);
+    BlockManager bm = cluster.getNameNode().getNamesystem().getBlockManager();
+    DatanodeManager dm = bm.getDatanodeManager();
+    try(FSDataOutputStream os = fs.create(new Path("/test-no-local"),
+        FsPermission.getDefault(),
+        flags, 512, (short)2, 512, null)) {
+      // Inject a DatanodeManager that returns one DataNode as local node for
+      // the client.
+      DatanodeManager spyDm = spy(dm);
+      DatanodeDescriptor dn1 = dm.getDatanodeListForReport
+          (HdfsConstants.DatanodeReportType.LIVE).get(0);
+      doReturn(dn1).when(spyDm).getDatanodeByHost("127.0.0.1");
+      Whitebox.setInternalState(bm, "datanodeManager", spyDm);
+      byte[] buf = new byte[512 * 16];
+      new Random().nextBytes(buf);
+      os.write(buf);
+    } finally {
+      Whitebox.setInternalState(bm, "datanodeManager", dm);
+    }
+    cluster.triggerBlockReports();
+    final String bpid = cluster.getNamesystem().getBlockPoolId();
+    // Total number of DataNodes is 3.
+    assertEquals(3, cluster.getAllBlockReports(bpid).size());
+    int numDataNodesWithData = 0;
+    for (Map<DatanodeStorage, BlockListAsLongs> dnBlocks :
+        cluster.getAllBlockReports(bpid)) {
+      for (BlockListAsLongs blocks : dnBlocks.values()) {
+        if (blocks.getNumberOfBlocks() > 0) {
+          numDataNodesWithData++;
+          break;
+        }
+      }
+    }
+    // Verify that only one DN has no data.
+    assertEquals(1, 3 - numDataNodesWithData);
+  }
+
   @AfterClass
   @AfterClass
   public static void tearDown() {
   public static void tearDown() {
     if (cluster != null) {
     if (cluster != null) {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -531,7 +531,7 @@ public class TestFileCreation {
 
 
       // add one block to the file
       // add one block to the file
       LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
       LocatedBlock location = client.getNamenode().addBlock(file1.toString(),
-          client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
+          client.clientName, null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
       System.out.println("testFileCreationError2: "
       System.out.println("testFileCreationError2: "
           + "Added block " + location.getBlock());
           + "Added block " + location.getBlock());
 
 
@@ -582,7 +582,7 @@ public class TestFileCreation {
       createFile(dfs, f, 3);
       createFile(dfs, f, 3);
       try {
       try {
         cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
         cluster.getNameNodeRpc().addBlock(f.toString(), client.clientName,
-            null, null, HdfsConstants.GRANDFATHER_INODE_ID, null);
+            null, null, HdfsConstants.GRANDFATHER_INODE_ID, null, null);
         fail();
         fail();
       } catch(IOException ioe) {
       } catch(IOException ioe) {
         FileSystem.LOG.info("GOOD!", ioe);
         FileSystem.LOG.info("GOOD!", ioe);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/BaseReplicationPolicyTest.java

@@ -156,6 +156,6 @@ abstract public class BaseReplicationPolicyTest {
      Set<Node> excludedNodes) {
      Set<Node> excludedNodes) {
     return replicator.chooseTarget(filename, numOfReplicas, writer,
     return replicator.chooseTarget(filename, numOfReplicas, writer,
         chosenNodes, false, excludedNodes, BLOCK_SIZE,
         chosenNodes, false, excludedNodes, BLOCK_SIZE,
-        TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+        TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
   }
   }
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestAvailableSpaceBlockPlacementPolicy.java

@@ -142,7 +142,7 @@ public class TestAvailableSpaceBlockPlacementPolicy {
               .getBlockManager()
               .getBlockManager()
               .getBlockPlacementPolicy()
               .getBlockPlacementPolicy()
               .chooseTarget(file, replica, null, new ArrayList<DatanodeStorageInfo>(), false, null,
               .chooseTarget(file, replica, null, new ArrayList<DatanodeStorageInfo>(), false, null,
-                blockSize, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+                blockSize, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
 
 
       Assert.assertTrue(targets.length == replica);
       Assert.assertTrue(targets.length == replica);
       for (int j = 0; j < replica; j++) {
       for (int j = 0; j < replica; j++) {

+ 43 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertNotEquals;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
@@ -30,6 +31,7 @@ import java.io.IOException;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Collection;
 import java.util.Collection;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.List;
 import java.util.List;
@@ -38,6 +40,7 @@ import java.util.Set;
 import java.util.concurrent.ThreadLocalRandom;
 import java.util.concurrent.ThreadLocalRandom;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.fs.StorageType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -290,7 +293,8 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
     excludedNodes.add(dataNodes[1]); 
     excludedNodes.add(dataNodes[1]); 
     chosenNodes.add(storages[2]);
     chosenNodes.add(storages[2]);
     targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
     targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
-        excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+        excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY,
+        null);
     System.out.println("targets=" + Arrays.asList(targets));
     System.out.println("targets=" + Arrays.asList(targets));
     assertEquals(2, targets.length);
     assertEquals(2, targets.length);
     //make sure that the chosen node is in the target.
     //make sure that the chosen node is in the target.
@@ -667,7 +671,8 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
           .getNamesystem().getBlockManager().getBlockPlacementPolicy();
           .getNamesystem().getBlockManager().getBlockPlacementPolicy();
       DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 3,
       DatanodeStorageInfo[] targets = replicator.chooseTarget(filename, 3,
           staleNodeInfo, new ArrayList<DatanodeStorageInfo>(), false, null,
           staleNodeInfo, new ArrayList<DatanodeStorageInfo>(), false, null,
-          BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+          BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY,
+          null);
 
 
       assertEquals(targets.length, 3);
       assertEquals(targets.length, 3);
       assertFalse(isOnSameRack(targets[0], staleNodeInfo));
       assertFalse(isOnSameRack(targets[0], staleNodeInfo));
@@ -693,7 +698,7 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
       // Call chooseTarget
       // Call chooseTarget
       targets = replicator.chooseTarget(filename, 3, staleNodeInfo,
       targets = replicator.chooseTarget(filename, 3, staleNodeInfo,
           new ArrayList<DatanodeStorageInfo>(), false, null, BLOCK_SIZE,
           new ArrayList<DatanodeStorageInfo>(), false, null, BLOCK_SIZE,
-          TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+          TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
       assertEquals(targets.length, 3);
       assertEquals(targets.length, 3);
       assertTrue(isOnSameRack(targets[0], staleNodeInfo));
       assertTrue(isOnSameRack(targets[0], staleNodeInfo));
       
       
@@ -1501,8 +1506,42 @@ public class TestReplicationPolicy extends BaseReplicationPolicyTest {
   private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
   private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
       DatanodeDescriptor writer, Set<Node> excludedNodes,
       DatanodeDescriptor writer, Set<Node> excludedNodes,
       List<DatanodeDescriptor> favoredNodes) {
       List<DatanodeDescriptor> favoredNodes) {
+    return chooseTarget(numOfReplicas, writer, excludedNodes,
+        favoredNodes, null);
+  }
+
+  private DatanodeStorageInfo[] chooseTarget(int numOfReplicas,
+    DatanodeDescriptor writer, Set<Node> excludedNodes,
+        List<DatanodeDescriptor> favoredNodes, EnumSet<AddBlockFlag> flags) {
     return replicator.chooseTarget(filename, numOfReplicas, writer,
     return replicator.chooseTarget(filename, numOfReplicas, writer,
         excludedNodes, BLOCK_SIZE, favoredNodes,
         excludedNodes, BLOCK_SIZE, favoredNodes,
-        TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+        TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, flags);
+  }
+
+  @Test
+  public void testAvoidLocalWrite() throws IOException {
+    DatanodeDescriptor writer = dataNodes[2];
+    EnumSet<AddBlockFlag> flags = EnumSet.of(AddBlockFlag.NO_LOCAL_WRITE);
+    DatanodeStorageInfo[] targets;
+    targets = chooseTarget(5, writer, null, null, flags);
+    for (DatanodeStorageInfo info : targets) {
+      assertNotEquals(info.getDatanodeDescriptor(), writer);
+    }
+  }
+
+  @Test
+  public void testAvoidLocalWriteNoEnoughNodes() throws IOException {
+    DatanodeDescriptor writer = dataNodes[2];
+    EnumSet<AddBlockFlag> flags = EnumSet.of(AddBlockFlag.NO_LOCAL_WRITE);
+    DatanodeStorageInfo[] targets;
+    targets = chooseTarget(6, writer, null, null, flags);
+    assertEquals(6, targets.length);
+    boolean found = false;
+    for (DatanodeStorageInfo info : targets) {
+      if (info.getDatanodeDescriptor().equals(writer)) {
+        found = true;
+      }
+    }
+    assertTrue(found);
   }
   }
 }
 }

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyConsiderLoad.java

@@ -112,7 +112,7 @@ public class TestReplicationPolicyConsiderLoad
       DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
       DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
           .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3,
           .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3,
               writerDn, new ArrayList<DatanodeStorageInfo>(), false, null,
               writerDn, new ArrayList<DatanodeStorageInfo>(), false, null,
-              1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+              1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
 
 
       assertEquals(3, targets.length);
       assertEquals(3, targets.length);
       Set<DatanodeStorageInfo> targetSet = new HashSet<>(
       Set<DatanodeStorageInfo> targetSet = new HashSet<>(
@@ -171,7 +171,7 @@ public class TestReplicationPolicyConsiderLoad
       DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
       DatanodeStorageInfo[] targets = namenode.getNamesystem().getBlockManager()
           .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3, writerDn,
           .getBlockPlacementPolicy().chooseTarget("testFile.txt", 3, writerDn,
               new ArrayList<DatanodeStorageInfo>(), false, null,
               new ArrayList<DatanodeStorageInfo>(), false, null,
-              1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+              1024, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
       for(DatanodeStorageInfo info : targets) {
       for(DatanodeStorageInfo info : targets) {
         assertTrue("The node "+info.getDatanodeDescriptor().getName()+
         assertTrue("The node "+info.getDatanodeDescriptor().getName()+
                 " has higher load and should not have been picked!",
                 " has higher load and should not have been picked!",

+ 5 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithNodeGroup.java

@@ -269,7 +269,7 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
       List<DatanodeDescriptor> favoredNodes) {
       List<DatanodeDescriptor> favoredNodes) {
     return replicator.chooseTarget(filename, numOfReplicas, writer,
     return replicator.chooseTarget(filename, numOfReplicas, writer,
       excludedNodes, BLOCK_SIZE, favoredNodes,
       excludedNodes, BLOCK_SIZE, favoredNodes,
-      TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+      TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
   }
   }
 
 
   /**
   /**
@@ -351,7 +351,8 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
     Set<Node> excludedNodes = new HashSet<>();
     Set<Node> excludedNodes = new HashSet<>();
     excludedNodes.add(dataNodes[1]);
     excludedNodes.add(dataNodes[1]);
     targets = repl.chooseTarget(filename, 4, dataNodes[0], chosenNodes, false, 
     targets = repl.chooseTarget(filename, 4, dataNodes[0], chosenNodes, false, 
-        excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+        excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY,
+        null);
     assertEquals(targets.length, 4);
     assertEquals(targets.length, 4);
     assertEquals(storages[0], targets[0]);
     assertEquals(storages[0], targets[0]);
 
 
@@ -369,7 +370,8 @@ public class TestReplicationPolicyWithNodeGroup extends BaseReplicationPolicyTes
     excludedNodes.add(dataNodes[1]); 
     excludedNodes.add(dataNodes[1]); 
     chosenNodes.add(storages[2]);
     chosenNodes.add(storages[2]);
     targets = repl.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
     targets = repl.chooseTarget(filename, 1, dataNodes[0], chosenNodes, true,
-        excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+        excludedNodes, BLOCK_SIZE, TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY,
+        null);
     System.out.println("targets=" + Arrays.asList(targets));
     System.out.println("targets=" + Arrays.asList(targets));
     assertEquals(2, targets.length);
     assertEquals(2, targets.length);
     //make sure that the chosen node is in the target.
     //make sure that the chosen node is in the target.

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicyWithUpgradeDomain.java

@@ -187,7 +187,7 @@ public class TestReplicationPolicyWithUpgradeDomain
     chosenNodes.add(storages[2]);
     chosenNodes.add(storages[2]);
     targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes,
     targets = replicator.chooseTarget(filename, 1, dataNodes[0], chosenNodes,
         true, excludedNodes, BLOCK_SIZE,
         true, excludedNodes, BLOCK_SIZE,
-        TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY);
+        TestBlockStoragePolicy.DEFAULT_STORAGE_POLICY, null);
     System.out.println("targets=" + Arrays.asList(targets));
     System.out.println("targets=" + Arrays.asList(targets));
     assertEquals(2, targets.length);
     assertEquals(2, targets.length);
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java

@@ -1204,7 +1204,7 @@ public class NNThroughputBenchmark implements Tool {
       for (int i = 0; i < 30; i++) {
       for (int i = 0; i < 30; i++) {
         try {
         try {
           return clientProto.addBlock(src, clientName,
           return clientProto.addBlock(src, clientName,
-              previous, excludeNodes, fileId, favoredNodes);
+              previous, excludeNodes, fileId, favoredNodes, null);
         } catch (NotReplicatedYetException|RemoteException e) {
         } catch (NotReplicatedYetException|RemoteException e) {
           if (e instanceof RemoteException) {
           if (e instanceof RemoteException) {
             String className = ((RemoteException) e).getClassName();
             String className = ((RemoteException) e).getClassName();

+ 4 - 4
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddBlockRetry.java

@@ -101,13 +101,13 @@ public class TestAddBlockRetry {
       ns.readUnlock();;
       ns.readUnlock();;
     }
     }
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
     DatanodeStorageInfo targets[] = FSDirWriteFileOp.chooseTargetForNewBlock(
-        ns.getBlockManager(), src, null, null, r);
+        ns.getBlockManager(), src, null, null, null, r);
     assertNotNull("Targets must be generated", targets);
     assertNotNull("Targets must be generated", targets);
 
 
     // run second addBlock()
     // run second addBlock()
     LOG.info("Starting second addBlock for " + src);
     LOG.info("Starting second addBlock for " + src);
     nn.addBlock(src, "clientName", null, null,
     nn.addBlock(src, "clientName", null, null,
-                HdfsConstants.GRANDFATHER_INODE_ID, null);
+                HdfsConstants.GRANDFATHER_INODE_ID, null, null);
     assertTrue("Penultimate block must be complete",
     assertTrue("Penultimate block must be complete",
                checkFileProgress(src, false));
                checkFileProgress(src, false));
     LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
     LocatedBlocks lbs = nn.getBlockLocations(src, 0, Long.MAX_VALUE);
@@ -161,14 +161,14 @@ public class TestAddBlockRetry {
     // start first addBlock()
     // start first addBlock()
     LOG.info("Starting first addBlock for " + src);
     LOG.info("Starting first addBlock for " + src);
     LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
     LocatedBlock lb1 = nameNodeRpc.addBlock(src, "clientName", null, null,
-        HdfsConstants.GRANDFATHER_INODE_ID, null);
+        HdfsConstants.GRANDFATHER_INODE_ID, null, null);
     assertTrue("Block locations should be present",
     assertTrue("Block locations should be present",
         lb1.getLocations().length > 0);
         lb1.getLocations().length > 0);
 
 
     cluster.restartNameNode();
     cluster.restartNameNode();
     nameNodeRpc = cluster.getNameNodeRpc();
     nameNodeRpc = cluster.getNameNodeRpc();
     LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
     LocatedBlock lb2 = nameNodeRpc.addBlock(src, "clientName", null, null,
-        HdfsConstants.GRANDFATHER_INODE_ID, null);
+        HdfsConstants.GRANDFATHER_INODE_ID, null, null);
     assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
     assertEquals("Blocks are not equal", lb1.getBlock(), lb2.getBlock());
     assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
     assertTrue("Wrong locations with retry", lb2.getLocations().length > 0);
   }
   }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestAddStripedBlocks.java

@@ -285,7 +285,7 @@ public class TestAddStripedBlocks {
       FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
       FSDirectory fsdir = cluster.getNamesystem().getFSDirectory();
       INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
       INodeFile fileNode = fsdir.getINode4Write(file.toString()).asFile();
       cluster.getNamesystem().getAdditionalBlock(file.toString(),
       cluster.getNamesystem().getAdditionalBlock(file.toString(),
-          fileNode.getId(), dfs.getClient().getClientName(), null, null, null);
+          fileNode.getId(), dfs.getClient().getClientName(), null, null, null, null);
       BlockInfo lastBlock = fileNode.getLastBlock();
       BlockInfo lastBlock = fileNode.getLastBlock();
 
 
       DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature()
       DatanodeStorageInfo[] locs = lastBlock.getUnderConstructionFeature()

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java

@@ -113,7 +113,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
 
 
         //test chooseTarget for new file
         //test chooseTarget for new file
         LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
         LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
-            null, null, fileStatus.getFileId(), null);
+            null, null, fileStatus.getFileId(), null, null);
         doTestLocatedBlock(replication, locatedBlock);
         doTestLocatedBlock(replication, locatedBlock);
 
 
         //test chooseTarget for existing file.
         //test chooseTarget for existing file.
@@ -143,7 +143,7 @@ public class TestBlockPlacementPolicyRackFaultTolerant {
 
 
     //test chooseTarget for new file
     //test chooseTarget for new file
     LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
     LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
-        null, null, fileStatus.getFileId(), null);
+        null, null, fileStatus.getFileId(), null, null);
     doTestLocatedBlock(20, locatedBlock);
     doTestLocatedBlock(20, locatedBlock);
 
 
     DatanodeInfo[] locs = locatedBlock.getLocations();
     DatanodeInfo[] locs = locatedBlock.getLocations();

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeadDatanode.java

@@ -166,7 +166,7 @@ public class TestDeadDatanode {
     // part of the cluster anymore
     // part of the cluster anymore
     DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
     DatanodeStorageInfo[] results = bm.chooseTarget4NewBlock("/hello", 3,
         clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7,
         clientNode, new HashSet<Node>(), 256 * 1024 * 1024L, null, (byte) 7,
-        false);
+        false, null);
     for (DatanodeStorageInfo datanodeStorageInfo : results) {
     for (DatanodeStorageInfo datanodeStorageInfo : results) {
       assertFalse("Dead node should not be choosen", datanodeStorageInfo
       assertFalse("Dead node should not be choosen", datanodeStorageInfo
           .getDatanodeDescriptor().equals(clientNode));
           .getDatanodeDescriptor().equals(clientNode));

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDefaultBlockPlacementPolicy.java

@@ -140,7 +140,7 @@ public class TestDefaultBlockPlacementPolicy {
           clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
           clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
           REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
           REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
       LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
       LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
-          null, null, fileStatus.getFileId(), null);
+          null, null, fileStatus.getFileId(), null, null);
 
 
       assertEquals("Block should be allocated sufficient locations",
       assertEquals("Block should be allocated sufficient locations",
           REPLICATION_FACTOR, locatedBlock.getLocations().length);
           REPLICATION_FACTOR, locatedBlock.getLocations().length);

+ 5 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestDeleteRace.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.util.AbstractMap;
 import java.util.AbstractMap;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
@@ -28,6 +29,7 @@ import java.util.Set;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.AddBlockFlag;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
@@ -123,10 +125,11 @@ public class TestDeleteRace {
                                       boolean returnChosenNodes,
                                       boolean returnChosenNodes,
                                       Set<Node> excludedNodes,
                                       Set<Node> excludedNodes,
                                       long blocksize,
                                       long blocksize,
-                                      final BlockStoragePolicy storagePolicy) {
+                                      final BlockStoragePolicy storagePolicy,
+                                      EnumSet<AddBlockFlag> flags) {
       DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
       DatanodeStorageInfo[] results = super.chooseTarget(srcPath,
           numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
           numOfReplicas, writer, chosenNodes, returnChosenNodes, excludedNodes,
-          blocksize, storagePolicy);
+          blocksize, storagePolicy, flags);
       try {
       try {
         Thread.sleep(3000);
         Thread.sleep(3000);
       } catch (InterruptedException e) {}
       } catch (InterruptedException e) {}

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestUpgradeDomainBlockPlacementPolicy.java

@@ -152,7 +152,7 @@ public class TestUpgradeDomainBlockPlacementPolicy {
           clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
           clientMachine, clientMachine, EnumSet.of(CreateFlag.CREATE), true,
           REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
           REPLICATION_FACTOR, DEFAULT_BLOCK_SIZE, null, false);
       LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
       LocatedBlock locatedBlock = nameNodeRpc.addBlock(src, clientMachine,
-          null, null, fileStatus.getFileId(), null);
+          null, null, fileStatus.getFileId(), null, null);
 
 
       assertEquals("Block should be allocated sufficient locations",
       assertEquals("Block should be allocated sufficient locations",
           REPLICATION_FACTOR, locatedBlock.getLocations().length);
           REPLICATION_FACTOR, locatedBlock.getLocations().length);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestHASafeMode.java

@@ -837,7 +837,7 @@ public class TestHASafeMode {
           new ExtendedBlock(previousBlock),
           new ExtendedBlock(previousBlock),
           new DatanodeInfo[0],
           new DatanodeInfo[0],
           DFSClientAdapter.getFileId((DFSOutputStream) create
           DFSClientAdapter.getFileId((DFSOutputStream) create
-              .getWrappedStream()), null);
+              .getWrappedStream()), null, null);
       cluster.restartNameNode(0, true);
       cluster.restartNameNode(0, true);
       cluster.restartDataNode(0);
       cluster.restartDataNode(0);
       cluster.transitionToActive(0);
       cluster.transitionToActive(0);

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestOpenFilesWithSnapshot.java

@@ -198,7 +198,7 @@ public class TestOpenFilesWithSnapshot {
     String clientName = fs.getClient().getClientName();
     String clientName = fs.getClient().getClientName();
     // create one empty block
     // create one empty block
     nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
     nameNodeRpc.addBlock(fileWithEmptyBlock.toString(), clientName, null, null,
-        HdfsConstants.GRANDFATHER_INODE_ID, null);
+        HdfsConstants.GRANDFATHER_INODE_ID, null, null);
     fs.createSnapshot(path, "s2");
     fs.createSnapshot(path, "s2");
 
 
     fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));
     fs.rename(new Path("/test/test"), new Path("/test/test-renamed"));

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/snapshot/TestSnapshotBlocksMap.java

@@ -282,7 +282,7 @@ public class TestSnapshotBlocksMap {
     ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
     ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
     cluster.getNameNodeRpc()
     cluster.getNameNodeRpc()
         .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
         .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
-            null, barNode.getId(), null);
+            null, barNode.getId(), null, null);
 
 
     SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
     SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
 
 
@@ -319,7 +319,7 @@ public class TestSnapshotBlocksMap {
     ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
     ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
     cluster.getNameNodeRpc()
     cluster.getNameNodeRpc()
         .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
         .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
-            null, barNode.getId(), null);
+            null, barNode.getId(), null, null);
 
 
     SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
     SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
 
 
@@ -358,7 +358,7 @@ public class TestSnapshotBlocksMap {
     ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
     ExtendedBlock previous = new ExtendedBlock(fsn.getBlockPoolId(), blks[0]);
     cluster.getNameNodeRpc()
     cluster.getNameNodeRpc()
         .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
         .addBlock(bar.toString(), hdfs.getClient().getClientName(), previous,
-            null, barNode.getId(), null);
+            null, barNode.getId(), null, null);
 
 
     SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");
     SnapshotTestHelper.createSnapshot(hdfs, foo, "s1");