Explorar o código

HDFS-15469. Dynamically configure the size of PacketReceiver#MAX_PACKET_SIZE. (#2138)

jianghuazhu %!s(int64=4) %!d(string=hai) anos
pai
achega
e6d2dccbef

+ 6 - 0
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/client/HdfsClientConfigKeys.java

@@ -221,6 +221,12 @@ public interface HdfsClientConfigKeys {
       "dfs.encrypt.data.transfer.cipher.key.bitlength";
   int    DFS_ENCRYPT_DATA_TRANSFER_CIPHER_KEY_BITLENGTH_DEFAULT = 128;
 
+  public static final String
+          DFS_DATA_TRANSFER_MAX_PACKET_SIZE =
+          "dfs.data.transfer.max.packet.size";
+  public static final int DFS_DATA_TRANSFER_MAX_PACKET_SIZE_DEFAULT =
+          16 * 1024 * 1024;
+
   String DFS_TRUSTEDCHANNEL_RESOLVER_CLASS =
       "dfs.trustedchannel.resolver.class";
 

+ 11 - 1
hadoop-hdfs-project/hadoop-hdfs-client/src/main/java/org/apache/hadoop/hdfs/protocol/datatransfer/PacketReceiver.java

@@ -25,6 +25,9 @@ import java.nio.ByteBuffer;
 import java.nio.channels.ReadableByteChannel;
 
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.apache.hadoop.util.DirectBufferPool;
 import org.apache.hadoop.io.IOUtils;
 
@@ -45,7 +48,7 @@ public class PacketReceiver implements Closeable {
    * The max size of any single packet. This prevents OOMEs when
    * invalid data is sent.
    */
-  public static final int MAX_PACKET_SIZE = 16 * 1024 * 1024;
+  public static final int MAX_PACKET_SIZE;
 
   static final Logger LOG = LoggerFactory.getLogger(PacketReceiver.class);
 
@@ -74,6 +77,13 @@ public class PacketReceiver implements Closeable {
    */
   private PacketHeader curHeader;
 
+  static {
+    Configuration conf = new HdfsConfiguration();
+    MAX_PACKET_SIZE = conf.getInt(HdfsClientConfigKeys.
+                    DFS_DATA_TRANSFER_MAX_PACKET_SIZE,
+            HdfsClientConfigKeys.DFS_DATA_TRANSFER_MAX_PACKET_SIZE_DEFAULT);
+  }
+
   public PacketReceiver(boolean useDirectBuffers) {
     this.useDirectBuffers = useDirectBuffers;
     reallocPacketBuf(PacketHeader.PKT_LENGTHS_LEN);

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/resources/hdfs-default.xml

@@ -4458,6 +4458,14 @@
   </description>
 </property>
 
+<property>
+  <name>dfs.data.transfer.max.packet.size</name>
+  <value>16777216</value>
+  <description>
+    The max size of any single packet.
+  </description>
+</property>
+
 <property>
   <name>dfs.datanode.balance.max.concurrent.moves</name>
   <value>100</value>

+ 8 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/protocol/datatransfer/TestPacketReceiver.java

@@ -24,6 +24,7 @@ import java.io.IOException;
 import java.nio.ByteBuffer;
 
 import org.apache.hadoop.hdfs.AppendTestUtil;
+import org.apache.hadoop.hdfs.client.HdfsClientConfigKeys;
 import org.junit.Test;
 import org.mockito.Mockito;
 
@@ -56,7 +57,13 @@ public class TestPacketReceiver {
     buf.get(b);
     return b;
   }
-  
+
+  @Test
+  public void testPacketSize() {
+    assertEquals(PacketReceiver.MAX_PACKET_SIZE,
+            HdfsClientConfigKeys.DFS_DATA_TRANSFER_MAX_PACKET_SIZE_DEFAULT);
+  }
+
   @Test
   public void testReceiveAndMirror() throws IOException {
     PacketReceiver pr = new PacketReceiver(false);