Przeglądaj źródła

HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to BlockPlacementPolicyRackFaultTolerant. (wang)

(cherry picked from commit c1d50a91f7c05e4aaf4655380c8dcd11703ff158)
Andrew Wang 10 lat temu
rodzic
commit
a80a68c298

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -254,6 +254,9 @@ Release 2.8.0 - UNRELEASED
     HDFS-8386. Improve synchronization of 'streamer' reference in
     DFSOutputStream. (Rakesh R via wang)
 
+    HDFS-8513. Rename BlockPlacementPolicyRackFaultTolarent to
+    BlockPlacementPolicyRackFaultTolerant. (wang)
+
   OPTIMIZATIONS
 
     HDFS-8026. Trace FSOutputSummer#writeChecksumChunks rather than

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolarent.java → hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockPlacementPolicyRackFaultTolerant.java

@@ -30,7 +30,7 @@ import java.util.*;
  * The strategy is that it tries its best to place the replicas to most racks.
  */
 @InterfaceAudience.Private
-public class BlockPlacementPolicyRackFaultTolarent extends BlockPlacementPolicyDefault {
+public class BlockPlacementPolicyRackFaultTolerant extends BlockPlacementPolicyDefault {
 
   @Override
   protected int[] getMaxNodesPerRack(int numOfChosen, int numOfReplicas) {

+ 3 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolarent.java → hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestBlockPlacementPolicyRackFaultTolerant.java

@@ -29,7 +29,7 @@ import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy;
-import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolarent;
+import org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicyRackFaultTolerant;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.net.StaticMapping;
 import org.junit.After;
@@ -42,7 +42,7 @@ import java.util.*;
 import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertTrue;
 
-public class TestBlockPlacementPolicyRackFaultTolarent {
+public class TestBlockPlacementPolicyRackFaultTolerant {
 
   private static final int DEFAULT_BLOCK_SIZE = 1024;
   private MiniDFSCluster cluster = null;
@@ -63,7 +63,7 @@ public class TestBlockPlacementPolicyRackFaultTolarent {
       }
     }
     conf.setClass(DFSConfigKeys.DFS_BLOCK_REPLICATOR_CLASSNAME_KEY,
-        BlockPlacementPolicyRackFaultTolarent.class,
+        BlockPlacementPolicyRackFaultTolerant.class,
         BlockPlacementPolicy.class);
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE / 2);