Browse Source

HDFS-12832. INode.getFullPathName may throw ArrayIndexOutOfBoundsException lead to NameNode exit. Contribuited by Konstantin Shvachko.

Konstantin V Shvachko 7 years ago
parent
commit
d331762f24

+ 0 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -1825,8 +1825,6 @@ public class BlockManager implements BlockStatsMXBean {
       }
 
       // choose replication targets: NOT HOLDING THE GLOBAL LOCK
-      // It is costly to extract the filename for which chooseTargets is called,
-      // so for now we pass in the block collection itself.
       final BlockPlacementPolicy placementPolicy =
           placementPolicies.getPolicy(rw.getBlock().getBlockType());
       rw.chooseTargets(placementPolicy, storagePolicySuite, excludedNodes);

+ 10 - 4
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockReconstructionWork.java

@@ -32,7 +32,8 @@ import java.util.Set;
 abstract class BlockReconstructionWork {
   private final BlockInfo block;
 
-  private final BlockCollection bc;
+  private final String srcPath;
+  private final byte storagePolicyID;
 
   /**
    * An erasure coding reconstruction task has multiple source nodes.
@@ -57,7 +58,8 @@ abstract class BlockReconstructionWork {
       int additionalReplRequired,
       int priority) {
     this.block = block;
-    this.bc = bc;
+    this.srcPath = bc.getName();
+    this.storagePolicyID = bc.getStoragePolicyID();
     this.srcNodes = srcNodes;
     this.containingNodes = containingNodes;
     this.liveReplicaStorages = liveReplicaStorages;
@@ -94,8 +96,12 @@ abstract class BlockReconstructionWork {
     return srcNodes;
   }
 
-  BlockCollection getBc() {
-    return bc;
+  public String getSrcPath() {
+    return srcPath;
+  }
+
+  public byte getStoragePolicyID() {
+    return storagePolicyID;
   }
 
   List<DatanodeStorageInfo> getLiveReplicaStorages() {

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ErasureCodingWork.java

@@ -58,10 +58,10 @@ class ErasureCodingWork extends BlockReconstructionWork {
       Set<Node> excludedNodes) {
     // TODO: new placement policy for EC considering multiple writers
     DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
-        getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0],
+        getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
         getLiveReplicaStorages(), false, excludedNodes,
         getBlock().getNumBytes(),
-        storagePolicySuite.getPolicy(getBc().getStoragePolicyID()), null);
+        storagePolicySuite.getPolicy(getStoragePolicyID()), null);
     setTargets(chosenTargets);
   }
 

+ 2 - 2
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/ReplicationWork.java

@@ -44,10 +44,10 @@ class ReplicationWork extends BlockReconstructionWork {
         : "At least 1 source node should have been selected";
     try {
       DatanodeStorageInfo[] chosenTargets = blockplacement.chooseTarget(
-          getBc().getName(), getAdditionalReplRequired(), getSrcNodes()[0],
+          getSrcPath(), getAdditionalReplRequired(), getSrcNodes()[0],
           getLiveReplicaStorages(), false, excludedNodes,
           getBlock().getNumBytes(),
-          storagePolicySuite.getPolicy(getBc().getStoragePolicyID()),
+          storagePolicySuite.getPolicy(getStoragePolicyID()),
           null);
       setTargets(chosenTargets);
     } finally {