Prechádzať zdrojové kódy

Merging r1035924 from trunk to federation. Both trunk and federation introduced new file TestDFSUtil.java. Hence merging the two.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1078191 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 14 rokov pred
rodič
commit
fa9045272a

+ 9 - 6
CHANGES.txt

@@ -295,12 +295,6 @@ Release 0.22.0 - Unreleased
     HDFS-895. Allow hflush/sync to occur in parallel with new writes
     to the file. (Todd Lipcon via hairong)
 
-    HDFS-1500. TestOfflineImageViewer failing on trunk. (Todd Lipcon
-    via hairong)
-
-    HDFS-1467. Append pipeline construction not succeeds with more than
-    one replica. (Todd Lipcon via hairong)
-
   IMPROVEMENTS
 
     HDFS-1304. Add a new unit test for HftpFileSystem.open(..).  (szetszwo)
@@ -454,6 +448,15 @@ Release 0.22.0 - Unreleased
     HDFS-718. Configuration parameter to prevent accidental formatting of 
     HDFS filesystem. (Andrew Ryan via jghoman)
 
+    HDFS-1500. TestOfflineImageViewer failing on trunk. (Todd Lipcon
+    via hairong)
+
+    HDFS-1467. Append pipeline construction not succeeds with more than
+    one replica. (Todd Lipcon via hairong)
+
+    HDFS-1483. DFSClient.getBlockLocations should indicate if corresponding
+    blocks are corrupt. (Patrick Kling via hairong)
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)

+ 2 - 1
src/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -245,7 +245,8 @@ public class DFSUtil {
       }
       blkLocations[idx] = new BlockLocation(names, hosts, racks,
                                             blk.getStartOffset(),
-                                            blk.getBlockSize());
+                                            blk.getBlockSize(),
+                                            blk.isCorrupt());
       idx++;
     }
     return blkLocations;

+ 45 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUtil.java

@@ -20,21 +20,24 @@ package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
 import java.net.InetSocketAddress;
-import java.net.URI;
 import java.util.Collection;
 import java.util.Iterator;
 import java.util.List;
+import java.util.Arrays;
 
 import junit.framework.Assert;
 
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetUtils;
 
 import static org.junit.Assert.*;
 import org.junit.Test;
 
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.fs.BlockLocation;
 
 public class TestDFSUtil {
   
@@ -193,4 +196,43 @@ public class TestDFSUtil {
     }
   }
 
+  /**
+   * Test conversion of LocatedBlock to BlockLocation
+   */
+  @Test
+  public void testLocatedBlocks2Locations() {
+    DatanodeInfo d = new DatanodeInfo();
+    DatanodeInfo[] ds = new DatanodeInfo[1];
+    ds[0] = d;
+
+    // ok
+    ExtendedBlock b1 = new ExtendedBlock("bpid", 1, 1, 1);
+    LocatedBlock l1 = new LocatedBlock(b1, ds, 0, false);
+
+    // corrupt
+    ExtendedBlock b2 = new ExtendedBlock("bpid", 2, 1, 1);
+    LocatedBlock l2 = new LocatedBlock(b2, ds, 0, true);
+
+    List<LocatedBlock> ls = Arrays.asList(l1, l2);
+    LocatedBlocks lbs = new LocatedBlocks(10, false, ls, l2, true);
+
+    BlockLocation[] bs = DFSUtil.locatedBlocks2Locations(lbs);
+
+    assertTrue("expected 2 blocks but got " + bs.length,
+               bs.length == 2);
+
+    int corruptCount = 0;
+    for (BlockLocation b: bs) {
+      if (b.isCorrupt()) {
+        corruptCount++;
+      }
+    }
+
+    assertTrue("expected 1 corrupt files but got " + corruptCount, 
+               corruptCount == 1);
+    
+    // test an empty location
+    bs = DFSUtil.locatedBlocks2Locations(new LocatedBlocks());
+    assertEquals(0, bs.length);
+  }
 }