Browse Source

Reverting the change r1164722

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.20-security@1164725 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 14 years ago
parent
commit
7797edcff0

+ 0 - 3
CHANGES.txt

@@ -63,9 +63,6 @@ Release 0.20.205.0 - unreleased
     HDFS-988. Fix bug where savenameSpace can corrupt edits log.
     HDFS-988. Fix bug where savenameSpace can corrupt edits log.
     (Nicolas Spiegelberg via dhruba)
     (Nicolas Spiegelberg via dhruba)
 
 
-    HDFS-1054. remove sleep before retry for allocating a block.
-    (Todd Lipcon via dhruba)
-
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via
     MAPREDUCE-2187. Reporter sends progress during sort/merge. (Anupam Seth via

+ 8 - 1
src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

@@ -3054,12 +3054,19 @@ public class DFSClient implements FSConstants, java.io.Closeable {
           namenode.abandonBlock(block, src, clientName);
           namenode.abandonBlock(block, src, clientName);
 
 
           if (errorIndex < nodes.length) {
           if (errorIndex < nodes.length) {
-            LOG.info("Excluding datanode " + nodes[errorIndex]);
+            LOG.debug("Excluding datanode " + nodes[errorIndex]);
             excludedNodes.add(nodes[errorIndex]);
             excludedNodes.add(nodes[errorIndex]);
           }
           }
 
 
           // Connection failed.  Let's wait a little bit and retry
           // Connection failed.  Let's wait a little bit and retry
           retry = true;
           retry = true;
+          try {
+            if (System.currentTimeMillis() - startTime > 5000) {
+              LOG.info("Waiting to find target node: " + nodes[0].getName());
+            }
+            Thread.sleep(6000);
+          } catch (InterruptedException iex) {
+          }
         }
         }
       } while (retry && --count >= 0);
       } while (retry && --count >= 0);
 
 

+ 0 - 55
src/test/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java

@@ -1,55 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.hdfs;
-
-import java.io.IOException;
-import java.io.OutputStream;
-
-import junit.*;
-import static junit.framework.Assert.fail;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
-
-/**
- * These tests make sure that DFSClient retries fetching data from DFS
- * properly in case of errors.
- */
-public class TestDFSClientExcludedNodes extends junit.framework.TestCase {
-
-  public void testExcludedNodes() throws IOException {
-    Configuration conf = new Configuration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
-    FileSystem fs = cluster.getFileSystem();
-    Path filePath = new Path("/testExcludedNodes");
-
-    // kill a datanode
-    cluster.stopDataNode(AppendTestUtil.nextInt(3));
-    OutputStream out = fs.create(filePath, true, 4096);
-    out.write(20);
-
-    try {
-      out.close();
-    } catch (Exception e) {
-      fail("DataNode failure should not result in a block abort: \n" + e.getMessage());
-    }
-  }
-  
-}