Procházet zdrojové kódy

HADOOP-5464. DFSClient did not treat write timeout of 0 properly. Contributed by Raghu Angadi and Brandon Li

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1@1343992 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze před 13 roky
rodič
revize
4e218bac10
2 změnil soubory, kde provedl 8 přidání a 3 odebrání
  1. 3 0
      CHANGES.txt
  2. 5 3
      src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

+ 3 - 0
CHANGES.txt

@@ -251,6 +251,9 @@ Release 1.1.0 - unreleased
 
     HDFS-3453. HDFS 1.x client is not interoperable with pre 1.x server.
     (Kihwal Lee via suresh)
+
+    HADOOP-5464. DFSClient did not treat write timeout of 0 properly.
+    (Raghu Angadi and Brandon Li via szetszwo)
     
 Release 1.0.3 - 2012.05.07
 

+ 5 - 3
src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

@@ -3527,14 +3527,16 @@ public class DFSClient implements FSConstants, java.io.Closeable {
         final String dnName = nodes[0].getName(connectToDnViaHostname);
         InetSocketAddress target = NetUtils.createSocketAddr(dnName);
         s = socketFactory.createSocket();
-        timeoutValue = 3000 * nodes.length + socketTimeout;
+        timeoutValue = (socketTimeout > 0) ?
+            (3000 * nodes.length + socketTimeout) : 0;
         LOG.debug("Connecting to " + dnName);
         NetUtils.connect(s, target, getRandomLocalInterfaceAddr(), timeoutValue);
         s.setSoTimeout(timeoutValue);
         s.setSendBufferSize(DEFAULT_DATA_SOCKET_SIZE);
         LOG.debug("Send buf size " + s.getSendBufferSize());
-        long writeTimeout = HdfsConstants.WRITE_TIMEOUT_EXTENSION * nodes.length +
-                            datanodeWriteTimeout;
+        long writeTimeout = (datanodeWriteTimeout > 0) ?
+            (HdfsConstants.WRITE_TIMEOUT_EXTENSION * nodes.length +
+            datanodeWriteTimeout) : 0;
 
         //
         // Xmit header info to datanode