浏览代码

HDFS-4377. Some trivial DN comment cleanup. Contributed by Eli Collins

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1431754 13f79535-47bb-0310-9956-ffa450edef68
Eli Collins 12 年之前
父节点
当前提交
71163ca1ed

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -186,6 +186,8 @@ Release 2.0.3-alpha - Unreleased
     HDFS-4363. Combine PBHelper and HdfsProtoUtil and remove redundant
     methods. (suresh)
 
+    HDFS-4377. Some trivial DN comment cleanup. (eli)
+
   OPTIMIZATIONS
 
   BUG FIXES 

+ 9 - 10
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -171,20 +171,19 @@ public class BlockManager {
    */
   private final Set<Block> postponedMisreplicatedBlocks = Sets.newHashSet();
 
-  //
-  // Keeps a TreeSet for every named node. Each treeset contains
-  // a list of the blocks that are "extra" at that location. We'll
-  // eventually remove these extras.
-  // Mapping: StorageID -> TreeSet<Block>
-  //
+  /**
+   * Maps a StorageID to the set of blocks that are "extra" for this
+   * DataNode. We'll eventually remove these extras.
+   */
   public final Map<String, LightWeightLinkedSet<Block>> excessReplicateMap =
     new TreeMap<String, LightWeightLinkedSet<Block>>();
 
-  //
-  // Store set of Blocks that need to be replicated 1 or more times.
-  // We also store pending replication-orders.
-  //
+  /**
+   * Store set of Blocks that need to be replicated 1 or more times.
+   * We also store pending replication-orders.
+   */
   public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
+
   @VisibleForTesting
   final PendingReplicationBlocks pendingReplications;
 

+ 14 - 16
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -970,29 +970,27 @@ public class DataNode extends Configured
     dnId.setStorageID(createNewStorageId(dnId.getXferPort()));
   }
   
+  /**
+   * @return a unique storage ID of form "DS-randInt-ipaddr-port-timestamp"
+   */
   static String createNewStorageId(int port) {
-    /* Return 
-     * "DS-randInt-ipaddr-currentTimeMillis"
-     * It is considered extermely rare for all these numbers to match
-     * on a different machine accidentally for the following 
-     * a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and
-     * b) Good chance ip address would be different, and
-     * c) Even on the same machine, Datanode is designed to use different ports.
-     * d) Good chance that these are started at different times.
-     * For a confict to occur all the 4 above have to match!.
-     * The format of this string can be changed anytime in future without
-     * affecting its functionality.
-     */
+    // It is unlikely that we will create a non-unique storage ID
+    // for the following reasons:
+    // a) SecureRandom is a cryptographically strong random number generator
+    // b) IP addresses will likely differ on different hosts
+    // c) DataNode xfer ports will differ on the same host
+    // d) StorageIDs will likely be generated at different times (in ms)
+    // A conflict requires that all four conditions are violated.
+    // NB: The format of this string can be changed in the future without
+    // requiring that old SotrageIDs be updated.
     String ip = "unknownIP";
     try {
       ip = DNS.getDefaultIP("default");
     } catch (UnknownHostException ignored) {
-      LOG.warn("Could not find ip address of \"default\" inteface.");
+      LOG.warn("Could not find an IP address for the \"default\" inteface.");
     }
-    
     int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
-    return "DS-" + rand + "-" + ip + "-" + port + "-"
-        + Time.now();
+    return "DS-" + rand + "-" + ip + "-" + port + "-" + Time.now();
   }
   
   /** Ensure the authentication method is kerberos */

+ 7 - 7
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -62,7 +62,7 @@ import org.apache.hadoop.util.DiskChecker;
  */
 @InterfaceAudience.Private
 public class DataStorage extends Storage {
-  // Constants
+
   public final static String BLOCK_SUBDIR_PREFIX = "subdir";
   final static String BLOCK_FILE_PREFIX = "blk_";
   final static String COPY_FILE_PREFIX = "dncp_";
@@ -71,13 +71,13 @@ public class DataStorage extends Storage {
   public final static String STORAGE_DIR_FINALIZED = "finalized";
   public final static String STORAGE_DIR_TMP = "tmp";
 
-  /** Access to this variable is guarded by "this" */
+  /** Unique storage ID. {@see DataNode#createNewStorageId(int)} for details */
   private String storageID;
 
-  // flag to ensure initialzing storage occurs only once
-  private boolean initilized = false;
+  // Flag to ensure we only initialize storage once
+  private boolean initialized = false;
   
-  // BlockPoolStorage is map of <Block pool Id, BlockPoolStorage>
+  // Maps block pool IDs to block pool storage
   private Map<String, BlockPoolSliceStorage> bpStorageMap
       = Collections.synchronizedMap(new HashMap<String, BlockPoolSliceStorage>());
 
@@ -130,7 +130,7 @@ public class DataStorage extends Storage {
   synchronized void recoverTransitionRead(DataNode datanode,
       NamespaceInfo nsInfo, Collection<File> dataDirs, StartupOption startOpt)
       throws IOException {
-    if (initilized) {
+    if (initialized) {
       // DN storage has been initialized, no need to do anything
       return;
     }
@@ -200,7 +200,7 @@ public class DataStorage extends Storage {
     this.writeAll();
     
     // 4. mark DN storage is initilized
-    this.initilized = true;
+    this.initialized = true;
   }
 
   /**