Browse Source

HADOOP-226. Fix fsck to properly handle replication counts, now that these can vary per file.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@428854 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 năm trước cách đây
mục cha
commit
abf504b511
2 tập tin đã thay đổi với 6 bổ sung2 xóa
  1. 3 0
      CHANGES.txt
  2. 3 2
      src/java/org/apache/hadoop/dfs/DFSck.java

+ 3 - 0
CHANGES.txt

@@ -139,6 +139,9 @@ Trunk (unreleased changes)
     Also, move JobConf.newInstance() to a new utility class.
     (Hairong Kuang via cutting)
 
+40. HADOOP-226.  Fix fsck command to properly consider replication
+    counts, now that these can vary per file.  (Bryan Pendleton via cutting)
+
 
 Release 0.4.0 - 2006-06-28
 

+ 3 - 2
src/java/org/apache/hadoop/dfs/DFSck.java

@@ -151,8 +151,9 @@ public class DFSck extends ToolBase {
       Block block = blocks[i].getBlock();
       long id = block.getBlockId();
       DatanodeInfo[] locs = blocks[i].getLocations();
-      if (locs.length > res.replication) res.overReplicatedBlocks += (locs.length - res.replication);
-      if (locs.length < res.replication && locs.length > 0) res.underReplicatedBlocks += (res.replication - locs.length);
+      short targetFileReplication = file.getReplication();
+      if (locs.length > targetFileReplication) res.overReplicatedBlocks += (locs.length - targetFileReplication);
+      if (locs.length < targetFileReplication && locs.length > 0) res.underReplicatedBlocks += (targetFileReplication - locs.length);
       report.append(i + ". " + id + " len=" + block.getNumBytes());
       if (locs == null || locs.length == 0) {
         report.append(" MISSING!");