Przeglądaj źródła

Merge trunk into HDFS-1073

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1073@1148533 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 14 lat temu
rodzic
commit
586de2790e
52 zmienionych plików z 519 dodań i 457 usunięć
  1. 28 0
      hdfs/CHANGES.txt
  2. 5 109
      hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java
  3. 4 4
      hdfs/src/java/org/apache/hadoop/hdfs/DFSInputStream.java
  4. 113 3
      hdfs/src/java/org/apache/hadoop/hdfs/DFSUtil.java
  5. 11 2
      hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java
  6. 0 3
      hdfs/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java
  7. 1 1
      hdfs/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java
  8. 24 0
      hdfs/src/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java
  9. 1 2
      hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java
  10. 2 2
      hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java
  11. 65 32
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java
  12. 1 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java
  13. 30 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java
  14. 3 4
      hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java
  15. 3 4
      hdfs/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java
  16. 1 4
      hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java
  17. 2 3
      hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java
  18. 2 5
      hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
  19. 2 4
      hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java
  20. 9 11
      hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java
  21. 4 4
      hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java
  22. 1 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java
  23. 10 10
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java
  24. 3 3
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java
  25. 1 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java
  26. 19 61
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
  27. 3 3
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  28. 1 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java
  29. 2 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
  30. 9 15
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java
  31. 20 22
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  32. 2 2
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
  33. 15 1
      hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java
  34. 8 12
      hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java
  35. 5 2
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java
  36. 0 10
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java
  37. 1 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
  38. 20 23
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
  39. 1 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
  40. 3 9
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
  41. 22 10
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java
  42. 16 16
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java
  43. 5 5
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  44. 3 4
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java
  45. 4 5
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java
  46. 5 3
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java
  47. 2 2
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java
  48. 17 25
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java
  49. 2 1
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java
  50. 2 2
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java
  51. 5 5
      hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java
  52. 1 1
      hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java

+ 28 - 0
hdfs/CHANGES.txt

@@ -317,6 +317,9 @@ Trunk (unreleased changes)
 
 
     HDFS-1547. Improve decommission mechanism. (suresh)
     HDFS-1547. Improve decommission mechanism. (suresh)
 
 
+    HDFS-2143. Federation: In cluster web console, add link to namenode page
+    that displays live and dead datanodes. (Ravi Prakash via suresh)
+
     HDFS-1588. Remove hardcoded strings for configuration keys, "dfs.hosts"
     HDFS-1588. Remove hardcoded strings for configuration keys, "dfs.hosts"
     and "dfs.hosts.exlude". (Erik Steffl via suresh)
     and "dfs.hosts.exlude". (Erik Steffl via suresh)
 
 
@@ -554,6 +557,28 @@ Trunk (unreleased changes)
 
 
     HDFS-2140. Move Host2NodesMap to the blockmanagement package.  (szetszwo)
     HDFS-2140. Move Host2NodesMap to the blockmanagement package.  (szetszwo)
 
 
+    HDFS-2154. In TestDFSShell, use TEST_ROOT_DIR and fix some deprecated
+    warnings.  (szetszwo)
+
+    HDFS-2153. Move DFSClientAdapter to test and fix some javac warnings in
+    OfflineEditsViewerHelper.  (szetszwo)
+
+    HDFS-2159. Deprecate DistributedFileSystem.getClient() and fixed the
+    deprecated warnings in DFSAdmin.  (szetszwo)
+
+    HDFS-2157. Improve header comment in o.a.h.hdfs.server.namenode.NameNode.
+    (atm via eli)
+
+    HDFS-2147. Move cluster network topology to block management and fix some
+    javac warnings.  (szetszwo)
+
+    HDFS-2141. Remove NameNode roles Active and Standby (they become
+    states of the namenode). (suresh)
+
+    HDFS-2161. Move createNamenode(..), createClientDatanodeProtocolProxy(..)
+    and Random object creation to DFSUtil; move DFSClient.stringifyToken(..)
+    to DelegationTokenIdentifier.  (szetszwo)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -831,6 +856,9 @@ Trunk (unreleased changes)
     HDFS-2120. on reconnect, DN can connect to NN even with different source
     HDFS-2120. on reconnect, DN can connect to NN even with different source
     versions. (John George via atm)
     versions. (John George via atm)
 
 
+    HDFS-2152. TestWriteConfigurationToDFS causing the random failures. (Uma
+    Maheswara Rao G via atm)
+
 Release 0.22.0 - Unreleased
 Release 0.22.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 5 - 109
hdfs/src/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -19,7 +19,6 @@
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
 import java.io.BufferedOutputStream;
 import java.io.BufferedOutputStream;
-import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
@@ -31,8 +30,6 @@ import java.util.EnumSet;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
-import java.util.Random;
-import java.util.concurrent.TimeUnit;
 
 
 import javax.net.SocketFactory;
 import javax.net.SocketFactory;
 
 
@@ -56,12 +53,9 @@ import org.apache.hadoop.fs.ParentNotDirectoryException;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.CorruptFileBlocks;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -89,9 +83,6 @@ import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.Client;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
@@ -124,7 +115,6 @@ public class DFSClient implements FSConstants, java.io.Closeable {
   volatile boolean clientRunning = true;
   volatile boolean clientRunning = true;
   private volatile FsServerDefaults serverDefaults;
   private volatile FsServerDefaults serverDefaults;
   private volatile long serverDefaultsLastUpdate;
   private volatile long serverDefaultsLastUpdate;
-  static Random r = new Random();
   final String clientName;
   final String clientName;
   Configuration conf;
   Configuration conf;
   SocketFactory socketFactory;
   SocketFactory socketFactory;
@@ -216,79 +206,6 @@ public class DFSClient implements FSConstants, java.io.Closeable {
    */
    */
   private final Map<String, DFSOutputStream> filesBeingWritten
   private final Map<String, DFSOutputStream> filesBeingWritten
       = new HashMap<String, DFSOutputStream>();
       = new HashMap<String, DFSOutputStream>();
-
-  /** Create a {@link NameNode} proxy */
-  public static ClientProtocol createNamenode(Configuration conf) throws IOException {
-    return createNamenode(NameNode.getAddress(conf), conf);
-  }
-
-  public static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr,
-      Configuration conf) throws IOException {
-    return createNamenode(createRPCNamenode(nameNodeAddr, conf,
-        UserGroupInformation.getCurrentUser()));
-    
-  }
-
-  private static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr,
-      Configuration conf, UserGroupInformation ugi) 
-    throws IOException {
-    return (ClientProtocol)RPC.getProxy(ClientProtocol.class,
-        ClientProtocol.versionID, nameNodeAddr, ugi, conf,
-        NetUtils.getSocketFactory(conf, ClientProtocol.class));
-  }
-
-  private static ClientProtocol createNamenode(ClientProtocol rpcNamenode)
-    throws IOException {
-    RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
-        5, LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
-    
-    Map<Class<? extends Exception>,RetryPolicy> remoteExceptionToPolicyMap =
-      new HashMap<Class<? extends Exception>, RetryPolicy>();
-    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, createPolicy);
-
-    Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
-      new HashMap<Class<? extends Exception>, RetryPolicy>();
-    exceptionToPolicyMap.put(RemoteException.class, 
-        RetryPolicies.retryByRemoteException(
-            RetryPolicies.TRY_ONCE_THEN_FAIL, remoteExceptionToPolicyMap));
-    RetryPolicy methodPolicy = RetryPolicies.retryByException(
-        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
-    Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
-    
-    methodNameToPolicyMap.put("create", methodPolicy);
-
-    return (ClientProtocol) RetryProxy.create(ClientProtocol.class,
-        rpcNamenode, methodNameToPolicyMap);
-  }
-
-  static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
-      DatanodeID datanodeid, Configuration conf, int socketTimeout,
-      LocatedBlock locatedBlock)
-      throws IOException {
-    InetSocketAddress addr = NetUtils.createSocketAddr(
-      datanodeid.getHost() + ":" + datanodeid.getIpcPort());
-    if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
-      ClientDatanodeProtocol.LOG.debug("ClientDatanodeProtocol addr=" + addr);
-    }
-    
-    // Since we're creating a new UserGroupInformation here, we know that no
-    // future RPC proxies will be able to re-use the same connection. And
-    // usages of this proxy tend to be one-off calls.
-    //
-    // This is a temporary fix: callers should really achieve this by using
-    // RPC.stopProxy() on the resulting object, but this is currently not
-    // working in trunk. See the discussion on HDFS-1965.
-    Configuration confWithNoIpcIdle = new Configuration(conf);
-    confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
-        .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
-
-    UserGroupInformation ticket = UserGroupInformation
-        .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
-    ticket.addToken(locatedBlock.getBlockToken());
-    return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class,
-        ClientDatanodeProtocol.versionID, addr, ticket, confWithNoIpcIdle,
-        NetUtils.getDefaultSocketFactory(conf), socketTimeout);
-  }
         
         
   /**
   /**
    * Same as this(NameNode.getAddress(conf), conf);
    * Same as this(NameNode.getAddress(conf), conf);
@@ -342,8 +259,8 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     this.clientName = leaserenewer.getClientName(dfsClientConf.taskId);
     this.clientName = leaserenewer.getClientName(dfsClientConf.taskId);
     this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity);
     this.socketCache = new SocketCache(dfsClientConf.socketCacheCapacity);
     if (nameNodeAddr != null && rpcNamenode == null) {
     if (nameNodeAddr != null && rpcNamenode == null) {
-      this.rpcNamenode = createRPCNamenode(nameNodeAddr, conf, ugi);
-      this.namenode = createNamenode(this.rpcNamenode);
+      this.rpcNamenode = DFSUtil.createRPCNamenode(nameNodeAddr, conf, ugi);
+      this.namenode = DFSUtil.createNamenode(this.rpcNamenode);
     } else if (nameNodeAddr == null && rpcNamenode != null) {
     } else if (nameNodeAddr == null && rpcNamenode != null) {
       //This case is used for testing.
       //This case is used for testing.
       this.namenode = this.rpcNamenode = rpcNamenode;
       this.namenode = this.rpcNamenode = rpcNamenode;
@@ -505,27 +422,6 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     }
     }
     return serverDefaults;
     return serverDefaults;
   }
   }
-
-  /**
-   *  A test method for printing out tokens 
-   *  @param token
-   *  @return Stringify version of the token
-   */
-  public static String stringifyToken(Token<DelegationTokenIdentifier> token)
-  throws IOException {
-    DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
-    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
-    DataInputStream in = new DataInputStream(buf);  
-    ident.readFields(in);
-    String str = ident.getKind() + " token " + ident.getSequenceNumber() + 
-    " for " + ident.getUser().getShortUserName();
-    if (token.getService().getLength() > 0) {
-      return (str + " on " + token.getService());
-    } else {
-      return str;
-    }
-  }
-
   
   
   /**
   /**
    * @see ClientProtocol#getDelegationToken(Text)
    * @see ClientProtocol#getDelegationToken(Text)
@@ -534,7 +430,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
       throws IOException {
       throws IOException {
     Token<DelegationTokenIdentifier> result =
     Token<DelegationTokenIdentifier> result =
       namenode.getDelegationToken(renewer);
       namenode.getDelegationToken(renewer);
-    LOG.info("Created " + stringifyToken(result));
+    LOG.info("Created " + DelegationTokenIdentifier.stringifyToken(result));
     return result;
     return result;
   }
   }
 
 
@@ -543,7 +439,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
    */
    */
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
   public long renewDelegationToken(Token<DelegationTokenIdentifier> token)
       throws InvalidToken, IOException {
       throws InvalidToken, IOException {
-    LOG.info("Renewing " + stringifyToken(token));
+    LOG.info("Renewing " + DelegationTokenIdentifier.stringifyToken(token));
     try {
     try {
       return namenode.renewDelegationToken(token);
       return namenode.renewDelegationToken(token);
     } catch (RemoteException re) {
     } catch (RemoteException re) {
@@ -557,7 +453,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
    */
    */
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
   public void cancelDelegationToken(Token<DelegationTokenIdentifier> token)
       throws InvalidToken, IOException {
       throws InvalidToken, IOException {
-    LOG.info("Cancelling " + stringifyToken(token));
+    LOG.info("Cancelling " + DelegationTokenIdentifier.stringifyToken(token));
     try {
     try {
       namenode.cancelDelegationToken(token);
       namenode.cancelDelegationToken(token);
     } catch (RemoteException re) {
     } catch (RemoteException re) {

+ 4 - 4
hdfs/src/java/org/apache/hadoop/hdfs/DFSInputStream.java

@@ -35,13 +35,13 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.ChecksumException;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.FSInputStream;
 import org.apache.hadoop.fs.UnresolvedLinkException;
 import org.apache.hadoop.fs.UnresolvedLinkException;
-import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
 import org.apache.hadoop.hdfs.server.datanode.ReplicaNotFoundException;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
@@ -157,7 +157,7 @@ public class DFSInputStream extends FSInputStream {
       ClientDatanodeProtocol cdp = null;
       ClientDatanodeProtocol cdp = null;
       
       
       try {
       try {
-        cdp = DFSClient.createClientDatanodeProtocolProxy(
+        cdp = DFSUtil.createClientDatanodeProtocolProxy(
         datanode, dfsClient.conf, dfsClient.getConf().socketTimeout, locatedblock);
         datanode, dfsClient.conf, dfsClient.getConf().socketTimeout, locatedblock);
         
         
         final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
         final long n = cdp.getReplicaVisibleLength(locatedblock.getBlock());
@@ -625,7 +625,7 @@ public class DFSInputStream extends FSInputStream {
           // will wait 6000ms grace period before retry and the waiting window is
           // will wait 6000ms grace period before retry and the waiting window is
           // expanded to 9000ms. 
           // expanded to 9000ms. 
           double waitTime = timeWindow * failures +       // grace period for the last round of attempt
           double waitTime = timeWindow * failures +       // grace period for the last round of attempt
-            timeWindow * (failures + 1) * dfsClient.r.nextDouble(); // expanding time window for each failure
+            timeWindow * (failures + 1) * DFSUtil.getRandom().nextDouble(); // expanding time window for each failure
           DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " IOException, will wait for " + waitTime + " msec.");
           DFSClient.LOG.warn("DFS chooseDataNode: got # " + (failures + 1) + " IOException, will wait for " + waitTime + " msec.");
           Thread.sleep((long)waitTime);
           Thread.sleep((long)waitTime);
         } catch (InterruptedException iex) {
         } catch (InterruptedException iex) {

+ 113 - 3
hdfs/src/java/org/apache/hadoop/hdfs/DFSUtil.java

@@ -18,31 +18,63 @@
 
 
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICES;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_FEDERATION_NAMESERVICE_ID;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_BACKUP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_RPC_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SECONDARY_HTTP_ADDRESS_KEY;
+import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.io.UnsupportedEncodingException;
 import java.io.UnsupportedEncodingException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
-import java.util.List;
 import java.util.Comparator;
 import java.util.Comparator;
+import java.util.HashMap;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
 import java.util.StringTokenizer;
 import java.util.StringTokenizer;
+import java.util.concurrent.TimeUnit;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import static org.apache.hadoop.hdfs.DFSConfigKeys.*;
 
 
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class DFSUtil {
 public class DFSUtil {
-  
+  private static final ThreadLocal<Random> RANDOM = new ThreadLocal<Random>() {
+    @Override
+    protected Random initialValue() {
+      return new Random();
+    }
+  };
+
+  /** @return a pseudorandom number generator. */
+  public static Random getRandom() {
+    return RANDOM.get();
+  }
+
   /**
   /**
    * Compartor for sorting DataNodeInfo[] based on decommissioned states.
    * Compartor for sorting DataNodeInfo[] based on decommissioned states.
    * Decommissioned nodes are moved to the end of the array on sorting with
    * Decommissioned nodes are moved to the end of the array on sorting with
@@ -586,4 +618,82 @@ public class DFSUtil {
   public static int roundBytesToGB(long bytes) {
   public static int roundBytesToGB(long bytes) {
     return Math.round((float)bytes/ 1024 / 1024 / 1024);
     return Math.round((float)bytes/ 1024 / 1024 / 1024);
   }
   }
+
+
+  /** Create a {@link NameNode} proxy */
+  public static ClientProtocol createNamenode(Configuration conf) throws IOException {
+    return createNamenode(NameNode.getAddress(conf), conf);
+  }
+
+  /** Create a {@link NameNode} proxy */
+  public static ClientProtocol createNamenode( InetSocketAddress nameNodeAddr,
+      Configuration conf) throws IOException {
+    return createNamenode(createRPCNamenode(nameNodeAddr, conf,
+        UserGroupInformation.getCurrentUser()));
+    
+  }
+
+  /** Create a {@link NameNode} proxy */
+  static ClientProtocol createRPCNamenode(InetSocketAddress nameNodeAddr,
+      Configuration conf, UserGroupInformation ugi) 
+    throws IOException {
+    return (ClientProtocol)RPC.getProxy(ClientProtocol.class,
+        ClientProtocol.versionID, nameNodeAddr, ugi, conf,
+        NetUtils.getSocketFactory(conf, ClientProtocol.class));
+  }
+
+  /** Create a {@link NameNode} proxy */
+  static ClientProtocol createNamenode(ClientProtocol rpcNamenode)
+    throws IOException {
+    RetryPolicy createPolicy = RetryPolicies.retryUpToMaximumCountWithFixedSleep(
+        5, FSConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);
+    
+    Map<Class<? extends Exception>,RetryPolicy> remoteExceptionToPolicyMap =
+      new HashMap<Class<? extends Exception>, RetryPolicy>();
+    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class, createPolicy);
+
+    Map<Class<? extends Exception>,RetryPolicy> exceptionToPolicyMap =
+      new HashMap<Class<? extends Exception>, RetryPolicy>();
+    exceptionToPolicyMap.put(RemoteException.class, 
+        RetryPolicies.retryByRemoteException(
+            RetryPolicies.TRY_ONCE_THEN_FAIL, remoteExceptionToPolicyMap));
+    RetryPolicy methodPolicy = RetryPolicies.retryByException(
+        RetryPolicies.TRY_ONCE_THEN_FAIL, exceptionToPolicyMap);
+    Map<String,RetryPolicy> methodNameToPolicyMap = new HashMap<String,RetryPolicy>();
+    
+    methodNameToPolicyMap.put("create", methodPolicy);
+
+    return (ClientProtocol) RetryProxy.create(ClientProtocol.class,
+        rpcNamenode, methodNameToPolicyMap);
+  }
+
+  /** Create a {@link ClientDatanodeProtocol} proxy */
+  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
+      DatanodeID datanodeid, Configuration conf, int socketTimeout,
+      LocatedBlock locatedBlock)
+      throws IOException {
+    InetSocketAddress addr = NetUtils.createSocketAddr(
+      datanodeid.getHost() + ":" + datanodeid.getIpcPort());
+    if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
+      ClientDatanodeProtocol.LOG.debug("ClientDatanodeProtocol addr=" + addr);
+    }
+    
+    // Since we're creating a new UserGroupInformation here, we know that no
+    // future RPC proxies will be able to re-use the same connection. And
+    // usages of this proxy tend to be one-off calls.
+    //
+    // This is a temporary fix: callers should really achieve this by using
+    // RPC.stopProxy() on the resulting object, but this is currently not
+    // working in trunk. See the discussion on HDFS-1965.
+    Configuration confWithNoIpcIdle = new Configuration(conf);
+    confWithNoIpcIdle.setInt(CommonConfigurationKeysPublic
+        .IPC_CLIENT_CONNECTION_MAXIDLETIME_KEY, 0);
+
+    UserGroupInformation ticket = UserGroupInformation
+        .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());
+    ticket.addToken(locatedBlock.getBlockToken());
+    return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class,
+        ClientDatanodeProtocol.versionID, addr, ticket, confWithNoIpcIdle,
+        NetUtils.getDefaultSocketFactory(conf), socketTimeout);
+  }
 }
 }

+ 11 - 2
hdfs/src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -532,6 +532,9 @@ public class DistributedFileSystem extends FileSystem {
     return "DFS[" + dfs + "]";
     return "DFS[" + dfs + "]";
   }
   }
 
 
+  /** @deprecated DFSClient should not be accessed directly. */
+  @InterfaceAudience.Private
+  @Deprecated
   public DFSClient getClient() {
   public DFSClient getClient() {
     return dfs;
     return dfs;
   }        
   }        
@@ -624,9 +627,15 @@ public class DistributedFileSystem extends FileSystem {
     return new CorruptFileBlockIterator(dfs, path);
     return new CorruptFileBlockIterator(dfs, path);
   }
   }
 
 
-  /** Return statistics for each datanode. */
+  /** @return datanode statistics. */
   public DatanodeInfo[] getDataNodeStats() throws IOException {
   public DatanodeInfo[] getDataNodeStats() throws IOException {
-    return dfs.datanodeReport(DatanodeReportType.ALL);
+    return getDataNodeStats(DatanodeReportType.ALL);
+  }
+
+  /** @return datanode statistics for the given type. */
+  public DatanodeInfo[] getDataNodeStats(final DatanodeReportType type
+      ) throws IOException {
+    return dfs.datanodeReport(type);
   }
   }
 
 
   /**
   /**

+ 0 - 3
hdfs/src/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -31,7 +31,6 @@ import java.security.PrivilegedExceptionAction;
 import java.text.ParseException;
 import java.text.ParseException;
 import java.text.SimpleDateFormat;
 import java.text.SimpleDateFormat;
 import java.util.ArrayList;
 import java.util.ArrayList;
-import java.util.Random;
 import java.util.TimeZone;
 import java.util.TimeZone;
 import java.util.concurrent.DelayQueue;
 import java.util.concurrent.DelayQueue;
 import java.util.concurrent.Delayed;
 import java.util.concurrent.Delayed;
@@ -49,7 +48,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
@@ -88,7 +86,6 @@ public class HftpFileSystem extends FileSystem {
   private URI hdfsURI;
   private URI hdfsURI;
   protected InetSocketAddress nnAddr;
   protected InetSocketAddress nnAddr;
   protected UserGroupInformation ugi; 
   protected UserGroupInformation ugi; 
-  protected final Random ran = new Random();
 
 
   public static final String HFTP_TIMEZONE = "UTC";
   public static final String HFTP_TIMEZONE = "UTC";
   public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";
   public static final String HFTP_DATE_FORMAT = "yyyy-MM-dd'T'HH:mm:ssZ";

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/LeaseRenewer.java

@@ -155,7 +155,7 @@ class LeaseRenewer {
     }
     }
   }
   }
 
 
-  private final String clienNamePostfix = DFSClient.r.nextInt()
+  private final String clienNamePostfix = DFSUtil.getRandom().nextInt()
       + "_" + Thread.currentThread().getId();
       + "_" + Thread.currentThread().getId();
 
 
   /** The time in milliseconds that the map became empty. */
   /** The time in milliseconds that the map became empty. */

+ 24 - 0
hdfs/src/java/org/apache/hadoop/hdfs/security/token/delegation/DelegationTokenIdentifier.java

@@ -18,8 +18,13 @@
 
 
 package org.apache.hadoop.hdfs.security.token.delegation;
 package org.apache.hadoop.hdfs.security.token.delegation;
 
 
+import java.io.ByteArrayInputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 import org.apache.hadoop.security.token.delegation.AbstractDelegationTokenIdentifier;
 
 
 /**
 /**
@@ -51,4 +56,23 @@ public class DelegationTokenIdentifier
     return HDFS_DELEGATION_KIND;
     return HDFS_DELEGATION_KIND;
   }
   }
 
 
+  @Override
+  public String toString() {
+    return getKind() + " token " + getSequenceNumber()
+        + " for " + getUser().getShortUserName();
+  }
+
+  /** @return a string representation of the token */
+  public static String stringifyToken(final Token<?> token) throws IOException {
+    DelegationTokenIdentifier ident = new DelegationTokenIdentifier();
+    ByteArrayInputStream buf = new ByteArrayInputStream(token.getIdentifier());
+    DataInputStream in = new DataInputStream(buf);  
+    ident.readFields(in);
+
+    if (token.getService().getLength() > 0) {
+      return ident + " on " + token.getService();
+    } else {
+      return ident.toString();
+    }
+  }
 }
 }

+ 1 - 2
hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/Balancer.java

@@ -186,7 +186,6 @@ public class Balancer {
   private final NameNodeConnector nnc;
   private final NameNodeConnector nnc;
   private final BalancingPolicy policy;
   private final BalancingPolicy policy;
   private final double threshold;
   private final double threshold;
-  private final static Random rnd = new Random();
   
   
   // all data node lists
   // all data node lists
   private Collection<Source> overUtilizedDatanodes
   private Collection<Source> overUtilizedDatanodes
@@ -780,7 +779,7 @@ public class Balancer {
   /* Shuffle datanode array */
   /* Shuffle datanode array */
   static private void shuffleArray(DatanodeInfo[] datanodes) {
   static private void shuffleArray(DatanodeInfo[] datanodes) {
     for (int i=datanodes.length; i>1; i--) {
     for (int i=datanodes.length; i>1; i--) {
-      int randomIndex = rnd.nextInt(i);
+      int randomIndex = DFSUtil.getRandom().nextInt(i);
       DatanodeInfo tmp = datanodes[randomIndex];
       DatanodeInfo tmp = datanodes[randomIndex];
       datanodes[randomIndex] = datanodes[i-1];
       datanodes[randomIndex] = datanodes[i-1];
       datanodes[i-1] = tmp;
       datanodes[i-1] = tmp;

+ 2 - 2
hdfs/src/java/org/apache/hadoop/hdfs/server/balancer/NameNodeConnector.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -79,7 +79,7 @@ class NameNodeConnector {
       ) throws IOException {
       ) throws IOException {
     this.namenodeAddress = namenodeAddress;
     this.namenodeAddress = namenodeAddress;
     this.namenode = createNamenode(namenodeAddress, conf);
     this.namenode = createNamenode(namenodeAddress, conf);
-    this.client = DFSClient.createNamenode(conf);
+    this.client = DFSUtil.createNamenode(conf);
     this.fs = FileSystem.get(NameNode.getUri(namenodeAddress), conf);
     this.fs = FileSystem.get(NameNode.getUri(namenodeAddress), conf);
 
 
     final NamespaceInfo namespaceinfo = namenode.versionRequest();
     final NamespaceInfo namespaceinfo = namenode.versionRequest();

+ 65 - 32
hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlockManager.java

@@ -22,18 +22,21 @@ import java.io.PrintWriter;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Collection;
 import java.util.Collection;
 import java.util.Collections;
 import java.util.Collections;
+import java.util.HashMap;
 import java.util.HashSet;
 import java.util.HashSet;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
-import java.util.Random;
 import java.util.TreeMap;
 import java.util.TreeMap;
 import java.util.TreeSet;
 import java.util.TreeSet;
 
 
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs.BlockReportIterator;
@@ -47,6 +50,7 @@ import org.apache.hadoop.hdfs.server.namenode.INode;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFile;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
+import org.apache.hadoop.net.Node;
 
 
 /**
 /**
  * Keeps information related to the blocks stored in the Hadoop cluster.
  * Keeps information related to the blocks stored in the Hadoop cluster.
@@ -55,8 +59,9 @@ import org.apache.hadoop.hdfs.server.namenode.NameNode;
  */
  */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class BlockManager {
 public class BlockManager {
-  // Default initial capacity and load factor of map
-  public static final int DEFAULT_INITIAL_MAP_CAPACITY = 16;
+  static final Log LOG = LogFactory.getLog(BlockManager.class);
+
+  /** Default load factor of map */
   public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
   public static final float DEFAULT_MAP_LOAD_FACTOR = 0.75f;
 
 
   private final FSNamesystem namesystem;
   private final FSNamesystem namesystem;
@@ -104,7 +109,7 @@ public class BlockManager {
   //
   //
   // Store blocks-->datanodedescriptor(s) map of corrupt replicas
   // Store blocks-->datanodedescriptor(s) map of corrupt replicas
   //
   //
-  CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
+  private final CorruptReplicasMap corruptReplicas = new CorruptReplicasMap();
 
 
   //
   //
   // Keeps a Collection for every named machine containing
   // Keeps a Collection for every named machine containing
@@ -112,7 +117,7 @@ public class BlockManager {
   // on the machine in question.
   // on the machine in question.
   // Mapping: StorageID -> ArrayList<Block>
   // Mapping: StorageID -> ArrayList<Block>
   //
   //
-  Map<String, Collection<Block>> recentInvalidateSets =
+  private final Map<String, Collection<Block>> recentInvalidateSets =
     new TreeMap<String, Collection<Block>>();
     new TreeMap<String, Collection<Block>>();
 
 
   //
   //
@@ -128,52 +133,41 @@ public class BlockManager {
   // Store set of Blocks that need to be replicated 1 or more times.
   // Store set of Blocks that need to be replicated 1 or more times.
   // We also store pending replication-orders.
   // We also store pending replication-orders.
   //
   //
-  public UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
-  private PendingReplicationBlocks pendingReplications;
+  public final UnderReplicatedBlocks neededReplications = new UnderReplicatedBlocks();
+  private final PendingReplicationBlocks pendingReplications;
 
 
   //  The maximum number of replicas allowed for a block
   //  The maximum number of replicas allowed for a block
-  public int maxReplication;
+  public final int maxReplication;
   //  How many outgoing replication streams a given node should have at one time
   //  How many outgoing replication streams a given node should have at one time
   public int maxReplicationStreams;
   public int maxReplicationStreams;
   // Minimum copies needed or else write is disallowed
   // Minimum copies needed or else write is disallowed
-  public int minReplication;
+  public final int minReplication;
   // Default number of replicas
   // Default number of replicas
-  public int defaultReplication;
+  public final int defaultReplication;
   // How many entries are returned by getCorruptInodes()
   // How many entries are returned by getCorruptInodes()
-  int maxCorruptFilesReturned;
+  final int maxCorruptFilesReturned;
   
   
   // variable to enable check for enough racks 
   // variable to enable check for enough racks 
-  boolean shouldCheckForEnoughRacks = true;
+  final boolean shouldCheckForEnoughRacks;
 
 
   /**
   /**
    * Last block index used for replication work.
    * Last block index used for replication work.
    */
    */
   private int replIndex = 0;
   private int replIndex = 0;
-  Random r = new Random();
 
 
   // for block replicas placement
   // for block replicas placement
-  public BlockPlacementPolicy replicator;
+  public final BlockPlacementPolicy replicator;
 
 
   public BlockManager(FSNamesystem fsn, Configuration conf) throws IOException {
   public BlockManager(FSNamesystem fsn, Configuration conf) throws IOException {
-    this(fsn, conf, DEFAULT_INITIAL_MAP_CAPACITY);
-  }
-  
-  BlockManager(FSNamesystem fsn, Configuration conf, int capacity)
-      throws IOException {
     namesystem = fsn;
     namesystem = fsn;
+    datanodeManager = new DatanodeManager(fsn);
+
+    blocksMap = new BlocksMap(DEFAULT_MAP_LOAD_FACTOR);
+    replicator = BlockPlacementPolicy.getInstance(
+        conf, namesystem, datanodeManager.getNetworkTopology());
     pendingReplications = new PendingReplicationBlocks(conf.getInt(
     pendingReplications = new PendingReplicationBlocks(conf.getInt(
       DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
       DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY,
       DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);
       DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_DEFAULT) * 1000L);
-    setConfigurationParameters(conf);
-    blocksMap = new BlocksMap(capacity, DEFAULT_MAP_LOAD_FACTOR);
-    datanodeManager = new DatanodeManager(fsn);
-  }
-
-  void setConfigurationParameters(Configuration conf) throws IOException {
-    this.replicator = BlockPlacementPolicy.getInstance(
-                         conf,
-                         namesystem,
-                         namesystem.clusterMap);
 
 
     this.maxCorruptFilesReturned = conf.getInt(
     this.maxCorruptFilesReturned = conf.getInt(
       DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
       DFSConfigKeys.DFS_DEFAULT_MAX_CORRUPT_FILES_RETURNED_KEY,
@@ -541,6 +535,22 @@ public class BlockManager {
                             minReplication);
                             minReplication);
   }
   }
 
 
+  /** Remove a datanode. */
+  public void removeDatanode(final DatanodeDescriptor node) {
+    final Iterator<? extends Block> it = node.getBlockIterator();
+    while(it.hasNext()) {
+      removeStoredBlock(it.next(), node);
+    }
+
+    node.resetBlocks();
+    removeFromInvalidates(node.getStorageID());
+    datanodeManager.getNetworkTopology().remove(node);
+
+    if (LOG.isDebugEnabled()) {
+      LOG.debug("remove datanode " + node.getName());
+    }
+  }
+  
   void removeFromInvalidates(String storageID, Block block) {
   void removeFromInvalidates(String storageID, Block block) {
     Collection<Block> v = recentInvalidateSets.get(storageID);
     Collection<Block> v = recentInvalidateSets.get(storageID);
     if (v != null && v.remove(block)) {
     if (v != null && v.remove(block)) {
@@ -741,12 +751,12 @@ public class BlockManager {
     int remainingNodes = numOfNodes - nodesToProcess;
     int remainingNodes = numOfNodes - nodesToProcess;
     if (nodesToProcess < remainingNodes) {
     if (nodesToProcess < remainingNodes) {
       for(int i=0; i<nodesToProcess; i++) {
       for(int i=0; i<nodesToProcess; i++) {
-        int keyIndex = r.nextInt(numOfNodes-i)+i;
+        int keyIndex = DFSUtil.getRandom().nextInt(numOfNodes-i)+i;
         Collections.swap(keyArray, keyIndex, i); // swap to front
         Collections.swap(keyArray, keyIndex, i); // swap to front
       }
       }
     } else {
     } else {
       for(int i=0; i<remainingNodes; i++) {
       for(int i=0; i<remainingNodes; i++) {
-        int keyIndex = r.nextInt(numOfNodes-i);
+        int keyIndex = DFSUtil.getRandom().nextInt(numOfNodes-i);
         Collections.swap(keyArray, keyIndex, numOfNodes-i-1); // swap to end
         Collections.swap(keyArray, keyIndex, numOfNodes-i-1); // swap to end
       }
       }
     }
     }
@@ -1001,6 +1011,29 @@ public class BlockManager {
     return true;
     return true;
   }
   }
 
 
+  /**
+   * Choose target datanodes according to the replication policy.
+   * @throws IOException if the number of targets < minimum replication.
+   * @see BlockPlacementPolicy#chooseTarget(String, int, DatanodeDescriptor, HashMap, long)
+   */
+  public DatanodeDescriptor[] chooseTarget(final String src,
+      final int numOfReplicas, final DatanodeDescriptor client,
+      final HashMap<Node, Node> excludedNodes,
+      final long blocksize) throws IOException {
+    // choose targets for the new block to be allocated.
+    final DatanodeDescriptor targets[] = replicator.chooseTarget(
+        src, numOfReplicas, client, excludedNodes, blocksize);
+    if (targets.length < minReplication) {
+      throw new IOException("File " + src + " could only be replicated to " +
+                            targets.length + " nodes, instead of " +
+                            minReplication + ". There are "
+                            + getDatanodeManager().getNetworkTopology().getNumOfLeaves()
+                            + " datanode(s) running but "+excludedNodes.size() +
+                            " node(s) are excluded in this operation.");
+    }
+    return targets;
+  }
+
   /**
   /**
    * Parse the data-nodes the block belongs to and choose one,
    * Parse the data-nodes the block belongs to and choose one,
    * which will be the replication source.
    * which will be the replication source.
@@ -1062,7 +1095,7 @@ public class BlockManager {
       // switch to a different node randomly
       // switch to a different node randomly
       // this to prevent from deterministically selecting the same node even
       // this to prevent from deterministically selecting the same node even
       // if the node failed to replicate the block on previous iterations
       // if the node failed to replicate the block on previous iterations
-      if(r.nextBoolean())
+      if(DFSUtil.getRandom().nextBoolean())
         srcNode = node;
         srcNode = node;
     }
     }
     if(numReplicas != null)
     if(numReplicas != null)

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/BlocksMap.java

@@ -57,7 +57,7 @@ public class BlocksMap {
   
   
   private GSet<Block, BlockInfo> blocks;
   private GSet<Block, BlockInfo> blocks;
 
 
-  BlocksMap(int initialCapacity, float loadFactor) {
+  BlocksMap(final float loadFactor) {
     this.capacity = computeCapacity();
     this.capacity = computeCapacity();
     this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity);
     this.blocks = new LightWeightGSet<Block, BlockInfo>(capacity);
   }
   }

+ 30 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/DatanodeManager.java

@@ -18,6 +18,8 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
 import java.io.IOException;
 import java.io.IOException;
+import java.util.Arrays;
+import java.util.List;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -25,8 +27,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
+import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 
 
 /**
 /**
@@ -39,6 +44,10 @@ public class DatanodeManager {
 
 
   final FSNamesystem namesystem;
   final FSNamesystem namesystem;
 
 
+  /** Cluster network topology */
+  private final NetworkTopology networktopology = new NetworkTopology();
+
+  /** Host names to datanode descriptors mapping. */
   private final Host2NodesMap host2DatanodeMap = new Host2NodesMap();
   private final Host2NodesMap host2DatanodeMap = new Host2NodesMap();
   
   
   DatanodeManager(final FSNamesystem namesystem) {
   DatanodeManager(final FSNamesystem namesystem) {
@@ -60,6 +69,24 @@ public class DatanodeManager {
     if (decommissionthread != null) decommissionthread.interrupt();
     if (decommissionthread != null) decommissionthread.interrupt();
   }
   }
 
 
+  /** @return the network topology. */
+  public NetworkTopology getNetworkTopology() {
+    return networktopology;
+  }
+  
+  /** Sort the located blocks by the distance to the target host. */
+  public void sortLocatedBlocks(final String targethost,
+      final List<LocatedBlock> locatedblocks) {
+    //sort the blocks
+    final DatanodeDescriptor client = getDatanodeByHost(targethost);
+    for (LocatedBlock b : locatedblocks) {
+      networktopology.pseudoSortByDistance(client, b.getLocations());
+      
+      // Move decommissioned datanodes to the bottom
+      Arrays.sort(b.getLocations(), DFSUtil.DECOM_COMPARATOR);
+    }    
+  }
+  
   /** @return the datanode descriptor for the host. */
   /** @return the datanode descriptor for the host. */
   public DatanodeDescriptor getDatanodeByHost(final String host) {
   public DatanodeDescriptor getDatanodeByHost(final String host) {
     return host2DatanodeMap.getDatanodeByHost(host);
     return host2DatanodeMap.getDatanodeByHost(host);
@@ -74,10 +101,12 @@ public class DatanodeManager {
       host2DatanodeMap.remove(
       host2DatanodeMap.remove(
           namesystem.datanodeMap.put(node.getStorageID(), node));
           namesystem.datanodeMap.put(node.getStorageID(), node));
     }
     }
+
     host2DatanodeMap.add(node);
     host2DatanodeMap.add(node);
+    networktopology.add(node);
 
 
     if (LOG.isDebugEnabled()) {
     if (LOG.isDebugEnabled()) {
-      LOG.debug(getClass().getSimpleName() + ".unprotectedAddDatanode: "
+      LOG.debug(getClass().getSimpleName() + ".addDatanode: "
           + "node " + node.getName() + " is added to datanodeMap.");
           + "node " + node.getName() + " is added to datanodeMap.");
     }
     }
   }
   }

+ 3 - 4
hdfs/src/java/org/apache/hadoop/hdfs/server/blockmanagement/Host2NodesMap.java

@@ -18,12 +18,12 @@
 package org.apache.hadoop.hdfs.server.blockmanagement;
 package org.apache.hadoop.hdfs.server.blockmanagement;
 
 
 import java.util.HashMap;
 import java.util.HashMap;
-import java.util.Random;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 import java.util.concurrent.locks.ReentrantReadWriteLock;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.hdfs.DFSUtil;
 
 
 /** A map from host names to datanode descriptors. */
 /** A map from host names to datanode descriptors. */
 @InterfaceAudience.Private
 @InterfaceAudience.Private
@@ -31,9 +31,8 @@ import org.apache.hadoop.classification.InterfaceStability;
 class Host2NodesMap {
 class Host2NodesMap {
   private HashMap<String, DatanodeDescriptor[]> map
   private HashMap<String, DatanodeDescriptor[]> map
     = new HashMap<String, DatanodeDescriptor[]>();
     = new HashMap<String, DatanodeDescriptor[]>();
-  private Random r = new Random();
   private ReadWriteLock hostmapLock = new ReentrantReadWriteLock();
   private ReadWriteLock hostmapLock = new ReentrantReadWriteLock();
-                      
+
   /** Check if node is already in the map. */
   /** Check if node is already in the map. */
   boolean contains(DatanodeDescriptor node) {
   boolean contains(DatanodeDescriptor node) {
     if (node==null) {
     if (node==null) {
@@ -151,7 +150,7 @@ class Host2NodesMap {
         return nodes[0];
         return nodes[0];
       }
       }
       // more than one node
       // more than one node
-      return nodes[r.nextInt(nodes.length)];
+      return nodes[DFSUtil.getRandom().nextInt(nodes.length)];
     } finally {
     } finally {
       hostmapLock.readLock().unlock();
       hostmapLock.readLock().unlock();
     }
     }

+ 3 - 4
hdfs/src/java/org/apache/hadoop/hdfs/server/common/HdfsConstants.java

@@ -65,7 +65,7 @@ public interface HdfsConstants {
       case CHECKPOINT: 
       case CHECKPOINT: 
         return NamenodeRole.CHECKPOINT;
         return NamenodeRole.CHECKPOINT;
       default:
       default:
-        return NamenodeRole.ACTIVE;
+        return NamenodeRole.NAMENODE;
       }
       }
     }
     }
     
     
@@ -89,10 +89,9 @@ public interface HdfsConstants {
    * Defines the NameNode role.
    * Defines the NameNode role.
    */
    */
   static public enum NamenodeRole {
   static public enum NamenodeRole {
-    ACTIVE    ("NameNode"),
+    NAMENODE  ("NameNode"),
     BACKUP    ("Backup Node"),
     BACKUP    ("Backup Node"),
-    CHECKPOINT("Checkpoint Node"),
-    STANDBY   ("Standby Node");
+    CHECKPOINT("Checkpoint Node");
 
 
     private String description = null;
     private String description = null;
     private NamenodeRole(String arg) {this.description = arg;}
     private NamenodeRole(String arg) {this.description = arg;}

+ 1 - 4
hdfs/src/java/org/apache/hadoop/hdfs/server/common/JspHelper.java

@@ -31,7 +31,6 @@ import java.util.Arrays;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Comparator;
 import java.util.Comparator;
 import java.util.HashMap;
 import java.util.HashMap;
-import java.util.Random;
 import java.util.TreeSet;
 import java.util.TreeSet;
 
 
 import javax.servlet.ServletContext;
 import javax.servlet.ServletContext;
@@ -72,8 +71,6 @@ public class JspHelper {
                                               "=";
                                               "=";
   private static final Log LOG = LogFactory.getLog(JspHelper.class);
   private static final Log LOG = LogFactory.getLog(JspHelper.class);
 
 
-  static final Random rand = new Random();
-
   /** Private constructor for preventing creating JspHelper object. */
   /** Private constructor for preventing creating JspHelper object. */
   private JspHelper() {} 
   private JspHelper() {} 
   
   
@@ -152,7 +149,7 @@ public class JspHelper {
       if (chosenNode == null) {
       if (chosenNode == null) {
         do {
         do {
           if (doRandom) {
           if (doRandom) {
-            index = rand.nextInt(nodes.length);
+            index = DFSUtil.getRandom().nextInt(nodes.length);
           } else {
           } else {
             index++;
             index++;
           }
           }

+ 2 - 3
hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/BlockPoolSliceScanner.java

@@ -43,6 +43,7 @@ import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
@@ -99,8 +100,6 @@ class BlockPoolSliceScanner {
   
   
   private LogFileHandler verificationLog;
   private LogFileHandler verificationLog;
   
   
-  private Random random = new Random();
-  
   private DataTransferThrottler throttler = null;
   private DataTransferThrottler throttler = null;
   
   
   private static enum ScanType {
   private static enum ScanType {
@@ -254,7 +253,7 @@ class BlockPoolSliceScanner {
     long period = Math.min(scanPeriod, 
     long period = Math.min(scanPeriod, 
                            Math.max(blockMap.size(),1) * 600 * 1000L);
                            Math.max(blockMap.size(),1) * 600 * 1000L);
     return System.currentTimeMillis() - scanPeriod + 
     return System.currentTimeMillis() - scanPeriod + 
-           random.nextInt((int)period);    
+        DFSUtil.getRandom().nextInt((int)period);    
   }
   }
 
 
   /** Adds block to list of blocks */
   /** Adds block to list of blocks */

+ 2 - 5
hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -71,7 +71,6 @@ import java.util.HashMap;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
-import java.util.Random;
 import java.util.Set;
 import java.util.Set;
 import java.util.concurrent.atomic.AtomicInteger;
 import java.util.concurrent.atomic.AtomicInteger;
 
 
@@ -398,8 +397,6 @@ public class DataNode extends Configured
   /** Activated plug-ins. */
   /** Activated plug-ins. */
   private List<ServicePlugin> plugins;
   private List<ServicePlugin> plugins;
   
   
-  private static final Random R = new Random();
-  
   // For InterDataNodeProtocol
   // For InterDataNodeProtocol
   public Server ipcServer;
   public Server ipcServer;
 
 
@@ -844,7 +841,7 @@ public class DataNode extends Configured
     void scheduleBlockReport(long delay) {
     void scheduleBlockReport(long delay) {
       if (delay > 0) { // send BR after random delay
       if (delay > 0) { // send BR after random delay
         lastBlockReport = System.currentTimeMillis()
         lastBlockReport = System.currentTimeMillis()
-        - ( blockReportInterval - R.nextInt((int)(delay)));
+        - ( blockReportInterval - DFSUtil.getRandom().nextInt((int)(delay)));
       } else { // send at next heartbeat
       } else { // send at next heartbeat
         lastBlockReport = lastHeartbeat - blockReportInterval;
         lastBlockReport = lastHeartbeat - blockReportInterval;
       }
       }
@@ -965,7 +962,7 @@ public class DataNode extends Configured
         // If we have sent the first block report, then wait a random
         // If we have sent the first block report, then wait a random
         // time before we start the periodic block reports.
         // time before we start the periodic block reports.
         if (resetBlockReportTime) {
         if (resetBlockReportTime) {
-          lastBlockReport = startTime - R.nextInt((int)(blockReportInterval));
+          lastBlockReport = startTime - DFSUtil.getRandom().nextInt((int)(blockReportInterval));
           resetBlockReportTime = false;
           resetBlockReportTime = false;
         } else {
         } else {
           /* say the last block report was at 8:20:14. The current report
           /* say the last block report was at 8:20:14. The current report

+ 2 - 4
hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/DirectoryScanner.java

@@ -26,7 +26,6 @@ import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
-import java.util.Random;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Callable;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.ExecutorService;
 import java.util.concurrent.Executors;
 import java.util.concurrent.Executors;
@@ -41,11 +40,11 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
-import org.apache.hadoop.util.StringUtils;
 
 
 /**
 /**
  * Periodically scans the data directories for block and block metadata files.
  * Periodically scans the data directories for block and block metadata files.
@@ -240,8 +239,7 @@ public class DirectoryScanner implements Runnable {
 
 
   void start() {
   void start() {
     shouldRun = true;
     shouldRun = true;
-    Random rand = new Random();
-    long offset = rand.nextInt((int) (scanPeriodMsecs/1000L)) * 1000L; //msec
+    long offset = DFSUtil.getRandom().nextInt((int) (scanPeriodMsecs/1000L)) * 1000L; //msec
     long firstScanTime = System.currentTimeMillis() + offset;
     long firstScanTime = System.currentTimeMillis() + offset;
     LOG.info("Periodic Directory Tree Verification scan starting at " 
     LOG.info("Periodic Directory Tree Verification scan starting at " 
         + firstScanTime + " with interval " + scanPeriodMsecs);
         + firstScanTime + " with interval " + scanPeriodMsecs);

+ 9 - 11
hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

@@ -36,9 +36,8 @@ import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
 import java.util.Map;
 import java.util.Map;
-import java.util.Random;
-import java.util.Set;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
+import java.util.Set;
 
 
 import javax.management.NotCompliantMBeanException;
 import javax.management.NotCompliantMBeanException;
 import javax.management.ObjectName;
 import javax.management.ObjectName;
@@ -50,24 +49,25 @@ import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.DU;
 import org.apache.hadoop.fs.DU;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
 import org.apache.hadoop.hdfs.protocol.RecoveryInProgressException;
+import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
 import org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean;
+import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
 import org.apache.hadoop.hdfs.server.protocol.ReplicaRecoveryInfo;
-import org.apache.hadoop.hdfs.server.protocol.BlockRecoveryCommand.RecoveringBlock;
+import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.metrics2.util.MBeans;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
-import org.apache.hadoop.hdfs.server.common.GenerationStamp;
-import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
-import org.apache.hadoop.io.IOUtils;
 
 
 /**************************************************
 /**************************************************
  * FSDataset manages a set of data blocks.  Each block
  * FSDataset manages a set of data blocks.  Each block
@@ -136,7 +136,7 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
             
             
       if (lastChildIdx < 0 && resetIdx) {
       if (lastChildIdx < 0 && resetIdx) {
         //reset so that all children will be checked
         //reset so that all children will be checked
-        lastChildIdx = random.nextInt(children.length);              
+        lastChildIdx = DFSUtil.getRandom().nextInt(children.length);              
       }
       }
             
             
       if (lastChildIdx >= 0 && children != null) {
       if (lastChildIdx >= 0 && children != null) {
@@ -164,7 +164,7 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
       }
       }
             
             
       //now pick a child randomly for creating a new set of subdirs.
       //now pick a child randomly for creating a new set of subdirs.
-      lastChildIdx = random.nextInt(children.length);
+      lastChildIdx = DFSUtil.getRandom().nextInt(children.length);
       return children[ lastChildIdx ].addBlock(b, src, true, false); 
       return children[ lastChildIdx ].addBlock(b, src, true, false); 
     }
     }
 
 
@@ -1122,7 +1122,6 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
   final FSVolumeSet volumes;
   final FSVolumeSet volumes;
   private final int maxBlocksPerDir;
   private final int maxBlocksPerDir;
   final ReplicasMap volumeMap;
   final ReplicasMap volumeMap;
-  static final Random random = new Random();
   final FSDatasetAsyncDiskService asyncDiskService;
   final FSDatasetAsyncDiskService asyncDiskService;
   private final int validVolsRequired;
   private final int validVolsRequired;
 
 
@@ -2178,7 +2177,6 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
   }
   }
 
 
   private ObjectName mbeanName;
   private ObjectName mbeanName;
-  private Random rand = new Random();
   
   
   /**
   /**
    * Register the FSDataset MBean using the name
    * Register the FSDataset MBean using the name
@@ -2191,7 +2189,7 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
     StandardMBean bean;
     StandardMBean bean;
     String storageName;
     String storageName;
     if (storageId == null || storageId.equals("")) {// Temp fix for the uninitialized storage
     if (storageId == null || storageId.equals("")) {// Temp fix for the uninitialized storage
-      storageName = "UndefinedStorageId" + rand.nextInt();
+      storageName = "UndefinedStorageId" + DFSUtil.getRandom().nextInt();
     } else {
     } else {
       storageName = storageId;
       storageName = storageId;
     }
     }

+ 4 - 4
hdfs/src/java/org/apache/hadoop/hdfs/server/datanode/metrics/DataNodeMetrics.java

@@ -17,10 +17,12 @@
  */
  */
 package org.apache.hadoop.hdfs.server.datanode.metrics;
 package org.apache.hadoop.hdfs.server.datanode.metrics;
 
 
-import java.util.Random;
+import static org.apache.hadoop.metrics2.impl.MsInfo.SessionId;
+
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.MetricsSystem;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metric;
 import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.annotation.Metrics;
@@ -29,7 +31,6 @@ import org.apache.hadoop.metrics2.lib.MetricsRegistry;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableCounterLong;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
 import org.apache.hadoop.metrics2.source.JvmMetrics;
-import static org.apache.hadoop.metrics2.impl.MsInfo.*;
 
 
 /**
 /**
  *
  *
@@ -72,7 +73,6 @@ public class DataNodeMetrics {
 
 
   final MetricsRegistry registry = new MetricsRegistry("datanode");
   final MetricsRegistry registry = new MetricsRegistry("datanode");
   final String name;
   final String name;
-  static final Random rng = new Random();
 
 
   public DataNodeMetrics(String name, String sessionId) {
   public DataNodeMetrics(String name, String sessionId) {
     this.name = name;
     this.name = name;
@@ -84,7 +84,7 @@ public class DataNodeMetrics {
     MetricsSystem ms = DefaultMetricsSystem.instance();
     MetricsSystem ms = DefaultMetricsSystem.instance();
     JvmMetrics.create("DataNode", sessionId, ms);
     JvmMetrics.create("DataNode", sessionId, ms);
     String name = "DataNodeActivity-"+ (dnName.isEmpty()
     String name = "DataNodeActivity-"+ (dnName.isEmpty()
-        ? "UndefinedDataNodeName"+ rng.nextInt() : dnName.replace(':', '-'));
+        ? "UndefinedDataNodeName"+ DFSUtil.getRandom().nextInt() : dnName.replace(':', '-'));
     return ms.register(name, null, new DataNodeMetrics(name, sessionId));
     return ms.register(name, null, new DataNodeMetrics(name, sessionId));
   }
   }
 
 

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupNode.java

@@ -331,7 +331,7 @@ public class BackupNode extends NameNode implements BackupNodeProtocol {
     String msg = null;
     String msg = null;
     if(nnReg == null) // consider as a rejection
     if(nnReg == null) // consider as a rejection
       msg = "Registration rejected by " + nnRpcAddress;
       msg = "Registration rejected by " + nnRpcAddress;
-    else if(!nnReg.isRole(NamenodeRole.ACTIVE)) {
+    else if(!nnReg.isRole(NamenodeRole.NAMENODE)) {
       msg = "Name-node " + nnRpcAddress + " is not active";
       msg = "Name-node " + nnRpcAddress + " is not active";
     }
     }
     if(msg != null) {
     if(msg != null) {

+ 10 - 10
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/ClusterJspHelper.java

@@ -584,12 +584,12 @@ class ClusterJspHelper {
         toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
         toXmlItemBlock(doc, "Blocks", Long.toString(nn.blocksCount));
         toXmlItemBlock(doc, "Missing Blocks",
         toXmlItemBlock(doc, "Missing Blocks",
             Long.toString(nn.missingBlocksCount));
             Long.toString(nn.missingBlocksCount));
-        toXmlItemBlock(doc, "Live Datanode (Decommissioned)",
-            Integer.toString(nn.liveDatanodeCount) + " ("
-                + Integer.toString(nn.liveDecomCount) + ")");
-        toXmlItemBlock(doc, "Dead Datanode (Decommissioned)",
-            Integer.toString(nn.deadDatanodeCount) + " ("
-                + Integer.toString(nn.deadDecomCount) + ")");
+        toXmlItemBlockWithLink(doc, nn.liveDatanodeCount + " (" +
+          nn.liveDecomCount + ")", nn.httpAddress+"/dfsnodelist.jsp?whatNodes=LIVE",
+          "Live Datanode (Decommissioned)");
+        toXmlItemBlockWithLink(doc, nn.deadDatanodeCount + " (" +
+          nn.deadDecomCount + ")", nn.httpAddress+"/dfsnodelist.jsp?whatNodes=DEAD"
+          , "Dead Datanode (Decommissioned)");
         doc.endTag(); // node
         doc.endTag(); // node
       }
       }
       doc.endTag(); // namenodes
       doc.endTag(); // namenodes
@@ -812,11 +812,11 @@ class ClusterJspHelper {
    * Generate a XML block as such, <item label="Node" value="hostname"
    * Generate a XML block as such, <item label="Node" value="hostname"
    * link="http://hostname:50070" />
    * link="http://hostname:50070" />
    */
    */
-  private static void toXmlItemBlockWithLink(XMLOutputter doc, String host,
-      String url, String nodetag) throws IOException {
+  private static void toXmlItemBlockWithLink(XMLOutputter doc, String value,
+      String url, String label) throws IOException {
     doc.startTag("item");
     doc.startTag("item");
-    doc.attribute("label", nodetag);
-    doc.attribute("value", host);
+    doc.attribute("label", label);
+    doc.attribute("value", value);
     doc.attribute("link", "http://" + url);
     doc.attribute("link", "http://" + url);
     doc.endTag(); // item
     doc.endTag(); // item
   }
   }

+ 3 - 3
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/DfsServlet.java

@@ -30,12 +30,12 @@ import javax.servlet.http.HttpServletResponse;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSUtil;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.znerd.xmlenc.XMLOutputter;
 import org.znerd.xmlenc.XMLOutputter;
@@ -82,7 +82,7 @@ abstract class DfsServlet extends HttpServlet {
     InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address");
     InetSocketAddress nnAddr = (InetSocketAddress)context.getAttribute("name.node.address");
     Configuration conf = new HdfsConfiguration(
     Configuration conf = new HdfsConfiguration(
         (Configuration)context.getAttribute(JspHelper.CURRENT_CONF));
         (Configuration)context.getAttribute(JspHelper.CURRENT_CONF));
-    return DFSClient.createNamenode(nnAddr, conf);
+    return DFSUtil.createNamenode(nnAddr, conf);
   }
   }
 
 
   /** Create a URI for redirecting request to a datanode */
   /** Create a URI for redirecting request to a datanode */

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -941,7 +941,7 @@ public class FSImage implements Closeable {
       msg = "Name node " + bnReg.getAddress()
       msg = "Name node " + bnReg.getAddress()
             + " has incompatible namespace id: " + bnReg.getNamespaceID()
             + " has incompatible namespace id: " + bnReg.getNamespaceID()
             + " expected: " + storage.getNamespaceID();
             + " expected: " + storage.getNamespaceID();
-    else if(bnReg.isRole(NamenodeRole.ACTIVE))
+    else if(bnReg.isRole(NamenodeRole.NAMENODE))
       msg = "Name node " + bnReg.getAddress()
       msg = "Name node " + bnReg.getAddress()
             + " role " + bnReg.getRole() + ": checkpoint is not allowed.";
             + " role " + bnReg.getRole() + ": checkpoint is not allowed.";
     else if(bnReg.getLayoutVersion() < storage.getLayoutVersion()
     else if(bnReg.getLayoutVersion() < storage.getLayoutVersion()

+ 19 - 61
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -45,7 +45,6 @@ import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.Map.Entry;
 import java.util.Map.Entry;
 import java.util.NavigableMap;
 import java.util.NavigableMap;
-import java.util.Random;
 import java.util.Set;
 import java.util.Set;
 import java.util.TreeMap;
 import java.util.TreeMap;
 import java.util.concurrent.TimeUnit;
 import java.util.concurrent.TimeUnit;
@@ -140,7 +139,6 @@ import org.apache.hadoop.net.CachedDNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.DNSToSwitchMapping;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
-import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.net.ScriptBasedMapping;
 import org.apache.hadoop.net.ScriptBasedMapping;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -242,7 +240,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   // Stores the correct file name hierarchy
   // Stores the correct file name hierarchy
   //
   //
   public FSDirectory dir;
   public FSDirectory dir;
-  public BlockManager blockManager;
+  BlockManager blockManager;
   
   
   // Block pool ID used by this namenode
   // Block pool ID used by this namenode
   String blockPoolId;
   String blockPoolId;
@@ -271,8 +269,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   public final NavigableMap<String, DatanodeDescriptor> datanodeMap = 
   public final NavigableMap<String, DatanodeDescriptor> datanodeMap = 
     new TreeMap<String, DatanodeDescriptor>();
     new TreeMap<String, DatanodeDescriptor>();
 
 
-  Random r = new Random();
-
   /**
   /**
    * Stores a set of DatanodeDescriptor objects.
    * Stores a set of DatanodeDescriptor objects.
    * This is a subset of {@link #datanodeMap}, containing nodes that are 
    * This is a subset of {@link #datanodeMap}, containing nodes that are 
@@ -320,8 +316,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
 
 
   private volatile SafeModeInfo safeMode;  // safe mode information
   private volatile SafeModeInfo safeMode;  // safe mode information
     
     
-  /** datanode network toplogy */
-  public NetworkTopology clusterMap = new NetworkTopology();
   private DNSToSwitchMapping dnsToSwitchMapping;
   private DNSToSwitchMapping dnsToSwitchMapping;
 
 
   private HostsFileReader hostsReader; 
   private HostsFileReader hostsReader; 
@@ -742,7 +736,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
         return new BlocksWithLocations(new BlockWithLocations[0]);
         return new BlocksWithLocations(new BlockWithLocations[0]);
       }
       }
       Iterator<BlockInfo> iter = node.getBlockIterator();
       Iterator<BlockInfo> iter = node.getBlockIterator();
-      int startBlock = r.nextInt(numBlocks); // starting from a random block
+      int startBlock = DFSUtil.getRandom().nextInt(numBlocks); // starting from a random block
       // skip blocks
       // skip blocks
       for(int i=0; i<startBlock; i++) {
       for(int i=0; i<startBlock; i++) {
         iter.next();
         iter.next();
@@ -878,15 +872,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       FileNotFoundException, UnresolvedLinkException, IOException {
       FileNotFoundException, UnresolvedLinkException, IOException {
     LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true);
     LocatedBlocks blocks = getBlockLocations(src, offset, length, true, true);
     if (blocks != null) {
     if (blocks != null) {
-      //sort the blocks
-      final DatanodeDescriptor client = 
-          blockManager.getDatanodeManager().getDatanodeByHost(clientMachine);
-      for (LocatedBlock b : blocks.getLocatedBlocks()) {
-        clusterMap.pseudoSortByDistance(client, b.getLocations());
-        
-        // Move decommissioned datanodes to the bottom
-        Arrays.sort(b.getLocations(), DFSUtil.DECOM_COMPARATOR);
-      }
+      blockManager.getDatanodeManager().sortLocatedBlocks(
+          clientMachine, blocks.getLocatedBlocks());
     }
     }
     return blocks;
     return blocks;
   }
   }
@@ -1776,16 +1763,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     }
     }
 
 
     // choose targets for the new block to be allocated.
     // choose targets for the new block to be allocated.
-    DatanodeDescriptor targets[] = blockManager.replicator.chooseTarget(
+    final DatanodeDescriptor targets[] = blockManager.chooseTarget(
         src, replication, clientNode, excludedNodes, blockSize);
         src, replication, clientNode, excludedNodes, blockSize);
-    if (targets.length < blockManager.minReplication) {
-      throw new IOException("File " + src + " could only be replicated to " +
-                            targets.length + " nodes, instead of " +
-                            blockManager.minReplication + ". There are "
-                            +clusterMap.getNumOfLeaves()+" datanode(s) running"
-                            +" but "+excludedNodes.size() +
-                            " node(s) are excluded in this operation.");
-    }
 
 
     // Allocate a new block and record it in the INode. 
     // Allocate a new block and record it in the INode. 
     writeLock();
     writeLock();
@@ -1996,8 +1975,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       blockManager.checkReplication(pendingBlocks[i], numExpectedReplicas);
       blockManager.checkReplication(pendingBlocks[i], numExpectedReplicas);
     }
     }
   }
   }
-
-  static Random randBlockId = new Random();
     
     
   /**
   /**
    * Allocate a block at the given pending filename
    * Allocate a block at the given pending filename
@@ -2011,9 +1988,9 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   private Block allocateBlock(String src, INode[] inodes,
   private Block allocateBlock(String src, INode[] inodes,
       DatanodeDescriptor targets[]) throws QuotaExceededException {
       DatanodeDescriptor targets[]) throws QuotaExceededException {
     assert hasWriteLock();
     assert hasWriteLock();
-    Block b = new Block(FSNamesystem.randBlockId.nextLong(), 0, 0); 
+    Block b = new Block(DFSUtil.getRandom().nextLong(), 0, 0); 
     while(isValidBlock(b)) {
     while(isValidBlock(b)) {
-      b.setBlockId(FSNamesystem.randBlockId.nextLong());
+      b.setBlockId(DFSUtil.getRandom().nextLong());
     }
     }
     b.setGenerationStamp(getGenerationStamp());
     b.setGenerationStamp(getGenerationStamp());
     b = dir.addBlock(src, inodes, b, targets);
     b = dir.addBlock(src, inodes, b, targets);
@@ -2883,14 +2860,14 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
                                       nodeReg.getStorageID());
                                       nodeReg.getStorageID());
       }
       }
       // update cluster map
       // update cluster map
-      clusterMap.remove(nodeS);
+      blockManager.getDatanodeManager().getNetworkTopology().remove(nodeS);
       nodeS.updateRegInfo(nodeReg);
       nodeS.updateRegInfo(nodeReg);
       nodeS.setHostName(hostName);
       nodeS.setHostName(hostName);
       nodeS.setDisallowed(false); // Node is in the include list
       nodeS.setDisallowed(false); // Node is in the include list
       
       
       // resolve network location
       // resolve network location
       resolveNetworkLocation(nodeS);
       resolveNetworkLocation(nodeS);
-      clusterMap.add(nodeS);
+      blockManager.getDatanodeManager().getNetworkTopology().add(nodeS);
         
         
       // also treat the registration message as a heartbeat
       // also treat the registration message as a heartbeat
       synchronized(heartbeats) {
       synchronized(heartbeats) {
@@ -2921,7 +2898,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName);
       = new DatanodeDescriptor(nodeReg, NetworkTopology.DEFAULT_RACK, hostName);
     resolveNetworkLocation(nodeDescr);
     resolveNetworkLocation(nodeDescr);
     blockManager.getDatanodeManager().addDatanode(nodeDescr);
     blockManager.getDatanodeManager().addDatanode(nodeDescr);
-    clusterMap.add(nodeDescr);
     checkDecommissioning(nodeDescr, dnAddress);
     checkDecommissioning(nodeDescr, dnAddress);
     
     
     // also treat the registration message as a heartbeat
     // also treat the registration message as a heartbeat
@@ -2984,7 +2960,7 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
   private String newStorageID() {
   private String newStorageID() {
     String newID = null;
     String newID = null;
     while(newID == null) {
     while(newID == null) {
-      newID = "DS" + Integer.toString(r.nextInt());
+      newID = "DS" + Integer.toString(DFSUtil.getRandom().nextInt());
       if (datanodeMap.get(newID) != null)
       if (datanodeMap.get(newID) != null)
         newID = null;
         newID = null;
     }
     }
@@ -3338,27 +3314,11 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       }
       }
     }
     }
 
 
-    Iterator<? extends Block> it = nodeInfo.getBlockIterator();
-    while(it.hasNext()) {
-      blockManager.removeStoredBlock(it.next(), nodeInfo);
-    }
-    unprotectedRemoveDatanode(nodeInfo);
-    clusterMap.remove(nodeInfo);
+    blockManager.removeDatanode(nodeInfo);
 
 
     checkSafeMode();
     checkSafeMode();
   }
   }
 
 
-  void unprotectedRemoveDatanode(DatanodeDescriptor nodeDescr) {
-    assert hasWriteLock();
-    nodeDescr.resetBlocks();
-    blockManager.removeFromInvalidates(nodeDescr.getStorageID());
-    if(NameNode.stateChangeLog.isDebugEnabled()) {
-      NameNode.stateChangeLog.debug(
-          "BLOCK* NameSystem.unprotectedRemoveDatanode: "
-          + nodeDescr.getName() + " is out of service now.");
-    }
-  }
-
   FSImage getFSImage() {
   FSImage getFSImage() {
     return dir.fsImage;
     return dir.fsImage;
   }
   }
@@ -4106,14 +4066,6 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     return node;
     return node;
   }
   }
 
 
-  /** Choose a random datanode
-   * 
-   * @return a randomly chosen datanode
-   */
-  DatanodeDescriptor getRandomDatanode() {
-    return (DatanodeDescriptor)clusterMap.chooseRandom(NodeBase.ROOT);
-  }
-
   /**
   /**
    * SafeModeInfo contains information related to the safe mode.
    * SafeModeInfo contains information related to the safe mode.
    * <p>
    * <p>
@@ -4280,9 +4232,10 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
       }
       }
       reached = -1;
       reached = -1;
       safeMode = null;
       safeMode = null;
+      final NetworkTopology nt = blockManager.getDatanodeManager().getNetworkTopology();
       NameNode.stateChangeLog.info("STATE* Network topology has "
       NameNode.stateChangeLog.info("STATE* Network topology has "
-                                   +clusterMap.getNumOfRacks()+" racks and "
-                                   +clusterMap.getNumOfLeaves()+ " datanodes");
+          + nt.getNumOfRacks() + " racks and "
+          + nt.getNumOfLeaves() + " datanodes");
       NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
       NameNode.stateChangeLog.info("STATE* UnderReplicatedBlocks has "
                                    +blockManager.neededReplications.size()+" blocks");
                                    +blockManager.neededReplications.size()+" blocks");
     }
     }
@@ -5851,6 +5804,11 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean,
     return blockPoolId;
     return blockPoolId;
   }
   }
 
 
+  /** @return the block manager. */
+  public BlockManager getBlockManager() {
+    return blockManager;
+  }
+
   /**
   /**
    * Remove an already decommissioned data node who is neither in include nor
    * Remove an already decommissioned data node who is neither in include nor
    * exclude hosts lists from the the list of live or dead nodes.  This is used
    * exclude hosts lists from the the list of live or dead nodes.  This is used

+ 3 - 3
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -32,14 +32,14 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
 import org.apache.hadoop.hdfs.server.datanode.DatanodeJspHelper;
-import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.znerd.xmlenc.XMLOutputter;
 import org.znerd.xmlenc.XMLOutputter;
@@ -61,7 +61,7 @@ public class FileChecksumServlets {
         (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
         (Configuration) context.getAttribute(JspHelper.CURRENT_CONF);
       final UserGroupInformation ugi = getUGI(request, conf);
       final UserGroupInformation ugi = getUGI(request, conf);
       final NameNode namenode = (NameNode)context.getAttribute("name.node");
       final NameNode namenode = (NameNode)context.getAttribute("name.node");
-      final DatanodeID datanode = namenode.getNamesystem().getRandomDatanode();
+      final DatanodeID datanode = NamenodeJspHelper.getRandomDatanode(namenode);
       try {
       try {
         final URI uri = createRedirectUri("/getFileChecksum", ugi, datanode, 
         final URI uri = createRedirectUri("/getFileChecksum", ugi, datanode, 
                                           request, namenode); 
                                           request, namenode); 

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileDataServlet.java

@@ -86,7 +86,7 @@ public class FileDataServlet extends DfsServlet {
     if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
     if (i.getLen() == 0 || blks.getLocatedBlocks().size() <= 0) {
       // pick a random datanode
       // pick a random datanode
       NameNode nn = (NameNode)getServletContext().getAttribute("name.node");
       NameNode nn = (NameNode)getServletContext().getAttribute("name.node");
-      return nn.getNamesystem().getRandomDatanode();
+      return NamenodeJspHelper.getRandomDatanode(nn);
     }
     }
     return JspHelper.bestNode(blks);
     return JspHelper.bestNode(blks);
   }
   }

+ 2 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java

@@ -66,7 +66,8 @@ public class FsckServlet extends DfsServlet {
             namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
             namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
           final short minReplication = namesystem.getMinReplication();
           final short minReplication = namesystem.getMinReplication();
 
 
-          new NamenodeFsck(conf, nn, nn.getNetworkTopology(), pmap, out,
+          new NamenodeFsck(conf, nn,
+              NamenodeJspHelper.getNetworkTopology(nn), pmap, out,
               totalDatanodes, minReplication, remoteAddress).fsck();
               totalDatanodes, minReplication, remoteAddress).fsck();
           
           
           return null;
           return null;

+ 9 - 15
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorage.java

@@ -17,13 +17,12 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import static org.apache.hadoop.hdfs.server.common.Util.now;
-
 import java.io.BufferedReader;
 import java.io.BufferedReader;
+import java.io.Closeable;
 import java.io.File;
 import java.io.File;
 import java.io.FileReader;
 import java.io.FileReader;
 import java.io.IOException;
 import java.io.IOException;
-import java.io.Closeable;
+import java.io.RandomAccessFile;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.net.URI;
 import java.net.URI;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
@@ -34,28 +33,26 @@ import java.util.Collection;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.List;
 import java.util.List;
-import java.util.Random;
 import java.util.Properties;
 import java.util.Properties;
 import java.util.UUID;
 import java.util.UUID;
-import java.io.RandomAccessFile;
 import java.util.concurrent.CopyOnWriteArrayList;
 import java.util.concurrent.CopyOnWriteArrayList;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
 import org.apache.hadoop.hdfs.protocol.LayoutVersion.Feature;
-import org.apache.hadoop.hdfs.server.common.Storage;
-import org.apache.hadoop.hdfs.server.common.UpgradeManager;
-import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
 import org.apache.hadoop.hdfs.server.common.InconsistentFSStateException;
-
+import org.apache.hadoop.hdfs.server.common.Storage;
+import org.apache.hadoop.hdfs.server.common.UpgradeManager;
+import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType;
 import org.apache.hadoop.hdfs.server.namenode.JournalStream.JournalType;
 import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
 import org.apache.hadoop.hdfs.util.AtomicFileOutputStream;
-import org.apache.hadoop.conf.Configuration;
 
 
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.net.DNS;
 import org.apache.hadoop.net.DNS;
@@ -530,11 +527,9 @@ public class NNStorage extends Storage implements Closeable {
    * @return new namespaceID
    * @return new namespaceID
    */
    */
   private int newNamespaceID() {
   private int newNamespaceID() {
-    Random r = new Random();
-    r.setSeed(now());
     int newID = 0;
     int newID = 0;
     while(newID == 0)
     while(newID == 0)
-      newID = r.nextInt(0x7FFFFFFF);  // use 31 bits only
+      newID = DFSUtil.getRandom().nextInt(0x7FFFFFFF);  // use 31 bits only
     return newID;
     return newID;
   }
   }
 
 
@@ -966,9 +961,8 @@ public class NNStorage extends Storage implements Closeable {
     try {
     try {
       rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE);
       rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE);
     } catch (NoSuchAlgorithmException e) {
     } catch (NoSuchAlgorithmException e) {
-      final Random R = new Random();
       LOG.warn("Could not use SecureRandom");
       LOG.warn("Could not use SecureRandom");
-      rand = R.nextInt(Integer.MAX_VALUE);
+      rand = DFSUtil.getRandom().nextInt(Integer.MAX_VALUE);
     }
     }
     String bpid = "BP-" + rand + "-"+ ip + "-" + System.currentTimeMillis();
     String bpid = "BP-" + rand + "-"+ ip + "-" + System.currentTimeMillis();
     return bpid;
     return bpid;

+ 20 - 22
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -90,7 +90,6 @@ import org.apache.hadoop.ipc.RPC;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.ipc.Server;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.Groups;
@@ -111,35 +110,38 @@ import org.apache.hadoop.util.StringUtils;
  * NameNode serves as both directory namespace manager and
  * NameNode serves as both directory namespace manager and
  * "inode table" for the Hadoop DFS.  There is a single NameNode
  * "inode table" for the Hadoop DFS.  There is a single NameNode
  * running in any DFS deployment.  (Well, except when there
  * running in any DFS deployment.  (Well, except when there
- * is a second backup/failover NameNode.)
+ * is a second backup/failover NameNode, or when using federated NameNodes.)
  *
  *
  * The NameNode controls two critical tables:
  * The NameNode controls two critical tables:
  *   1)  filename->blocksequence (namespace)
  *   1)  filename->blocksequence (namespace)
  *   2)  block->machinelist ("inodes")
  *   2)  block->machinelist ("inodes")
  *
  *
  * The first table is stored on disk and is very precious.
  * The first table is stored on disk and is very precious.
- * The second table is rebuilt every time the NameNode comes
- * up.
+ * The second table is rebuilt every time the NameNode comes up.
  *
  *
  * 'NameNode' refers to both this class as well as the 'NameNode server'.
  * 'NameNode' refers to both this class as well as the 'NameNode server'.
  * The 'FSNamesystem' class actually performs most of the filesystem
  * The 'FSNamesystem' class actually performs most of the filesystem
  * management.  The majority of the 'NameNode' class itself is concerned
  * management.  The majority of the 'NameNode' class itself is concerned
- * with exposing the IPC interface and the http server to the outside world,
+ * with exposing the IPC interface and the HTTP server to the outside world,
  * plus some configuration management.
  * plus some configuration management.
  *
  *
- * NameNode implements the ClientProtocol interface, which allows
- * clients to ask for DFS services.  ClientProtocol is not
- * designed for direct use by authors of DFS client code.  End-users
- * should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
+ * NameNode implements the
+ * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol} interface, which
+ * allows clients to ask for DFS services.
+ * {@link org.apache.hadoop.hdfs.protocol.ClientProtocol} is not designed for
+ * direct use by authors of DFS client code.  End-users should instead use the
+ * {@link org.apache.hadoop.fs.FileSystem} class.
  *
  *
- * NameNode also implements the DatanodeProtocol interface, used by
- * DataNode programs that actually store DFS data blocks.  These
+ * NameNode also implements the
+ * {@link org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol} interface,
+ * used by DataNodes that actually store DFS data blocks.  These
  * methods are invoked repeatedly and automatically by all the
  * methods are invoked repeatedly and automatically by all the
  * DataNodes in a DFS deployment.
  * DataNodes in a DFS deployment.
  *
  *
- * NameNode also implements the NamenodeProtocol interface, used by
- * secondary namenodes or rebalancing processes to get partial namenode's
- * state, for example partial blocksMap etc.
+ * NameNode also implements the
+ * {@link org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol} interface,
+ * used by secondary namenodes or rebalancing processes to get partial
+ * NameNode state, for example partial blocksMap etc.
  **********************************************************/
  **********************************************************/
 @InterfaceAudience.Private
 @InterfaceAudience.Private
 public class NameNode implements NamenodeProtocols, FSConstants {
 public class NameNode implements NamenodeProtocols, FSConstants {
@@ -479,7 +481,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
    * Activate name-node servers and threads.
    * Activate name-node servers and threads.
    */
    */
   void activate(Configuration conf) throws IOException {
   void activate(Configuration conf) throws IOException {
-    if ((isRole(NamenodeRole.ACTIVE))
+    if ((isRole(NamenodeRole.NAMENODE))
         && (UserGroupInformation.isSecurityEnabled())) {
         && (UserGroupInformation.isSecurityEnabled())) {
       namesystem.activateSecretManager();
       namesystem.activateSecretManager();
     }
     }
@@ -645,7 +647,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
    * @throws IOException
    * @throws IOException
    */
    */
   public NameNode(Configuration conf) throws IOException {
   public NameNode(Configuration conf) throws IOException {
-    this(conf, NamenodeRole.ACTIVE);
+    this(conf, NamenodeRole.NAMENODE);
   }
   }
 
 
   protected NameNode(Configuration conf, NamenodeRole role) 
   protected NameNode(Configuration conf, NamenodeRole role) 
@@ -752,7 +754,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
   public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
   public NamenodeCommand startCheckpoint(NamenodeRegistration registration)
   throws IOException {
   throws IOException {
     verifyRequest(registration);
     verifyRequest(registration);
-    if(!isRole(NamenodeRole.ACTIVE))
+    if(!isRole(NamenodeRole.NAMENODE))
       throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
       throw new IOException("Only an ACTIVE node can invoke startCheckpoint.");
     return namesystem.startCheckpoint(registration, setRegistration());
     return namesystem.startCheckpoint(registration, setRegistration());
   }
   }
@@ -761,7 +763,7 @@ public class NameNode implements NamenodeProtocols, FSConstants {
   public void endCheckpoint(NamenodeRegistration registration,
   public void endCheckpoint(NamenodeRegistration registration,
                             CheckpointSignature sig) throws IOException {
                             CheckpointSignature sig) throws IOException {
     verifyRequest(registration);
     verifyRequest(registration);
-    if(!isRole(NamenodeRole.ACTIVE))
+    if(!isRole(NamenodeRole.NAMENODE))
       throw new IOException("Only an ACTIVE node can invoke endCheckpoint.");
       throw new IOException("Only an ACTIVE node can invoke endCheckpoint.");
     namesystem.endCheckpoint(registration, sig);
     namesystem.endCheckpoint(registration, sig);
   }
   }
@@ -1391,10 +1393,6 @@ public class NameNode implements NamenodeProtocols, FSConstants {
     return httpAddress;
     return httpAddress;
   }
   }
 
 
-  NetworkTopology getNetworkTopology() {
-    return this.namesystem.clusterMap;
-  }
-
   /**
   /**
    * Verify that configured directories exist, then
    * Verify that configured directories exist, then
    * Interactively confirm that formatting is desired 
    * Interactively confirm that formatting is desired 

+ 2 - 2
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

@@ -39,6 +39,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.BlockReader;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.DirectoryListing;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -553,7 +554,6 @@ public class NamenodeFsck {
    * Pick the best node from which to stream the data.
    * Pick the best node from which to stream the data.
    * That's the local one, if available.
    * That's the local one, if available.
    */
    */
-  Random r = new Random();
   private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
   private DatanodeInfo bestNode(DFSClient dfs, DatanodeInfo[] nodes,
                                 TreeSet<DatanodeInfo> deadNodes) throws IOException {
                                 TreeSet<DatanodeInfo> deadNodes) throws IOException {
     if ((nodes == null) ||
     if ((nodes == null) ||
@@ -562,7 +562,7 @@ public class NamenodeFsck {
     }
     }
     DatanodeInfo chosenNode;
     DatanodeInfo chosenNode;
     do {
     do {
-      chosenNode = nodes[r.nextInt(nodes.length)];
+      chosenNode = nodes[DFSUtil.getRandom().nextInt(nodes.length)];
     } while (deadNodes.contains(chosenNode));
     } while (deadNodes.contains(chosenNode));
     return chosenNode;
     return chosenNode;
   }
   }

+ 15 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NamenodeJspHelper.java

@@ -48,6 +48,8 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
+import org.apache.hadoop.net.NetworkTopology;
+import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.util.ServletUtil;
 import org.apache.hadoop.util.ServletUtil;
@@ -368,13 +370,25 @@ class NamenodeJspHelper {
     return token == null ? null : token.encodeToUrlString();
     return token == null ? null : token.encodeToUrlString();
   }
   }
 
 
+  /** @return the network topology. */
+  static NetworkTopology getNetworkTopology(final NameNode namenode) {
+    return namenode.getNamesystem().getBlockManager().getDatanodeManager(
+        ).getNetworkTopology();
+  }
+
+  /** @return a randomly chosen datanode. */
+  static DatanodeDescriptor getRandomDatanode(final NameNode namenode) {
+    return (DatanodeDescriptor)getNetworkTopology(namenode).chooseRandom(
+        NodeBase.ROOT);
+  }
+  
   static void redirectToRandomDataNode(ServletContext context,
   static void redirectToRandomDataNode(ServletContext context,
       HttpServletRequest request, HttpServletResponse resp) throws IOException,
       HttpServletRequest request, HttpServletResponse resp) throws IOException,
       InterruptedException {
       InterruptedException {
     final NameNode nn = (NameNode) context.getAttribute("name.node");
     final NameNode nn = (NameNode) context.getAttribute("name.node");
     final Configuration conf = (Configuration) context
     final Configuration conf = (Configuration) context
         .getAttribute(JspHelper.CURRENT_CONF);
         .getAttribute(JspHelper.CURRENT_CONF);
-    final DatanodeID datanode = nn.getNamesystem().getRandomDatanode();
+    final DatanodeID datanode = getRandomDatanode(nn);
     UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
     UserGroupInformation ugi = JspHelper.getUGI(context, request, conf);
     String tokenString = getDelegationToken(nn, request, conf, ugi);
     String tokenString = getDelegationToken(nn, request, conf, ugi);
     // if the user is defined, get a delegation token and stringify it
     // if the user is defined, get a delegation token and stringify it

+ 8 - 12
hdfs/src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -34,7 +34,6 @@ import org.apache.hadoop.fs.FsStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.fs.shell.CommandFormat;
-import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
@@ -95,7 +94,7 @@ public class DFSAdmin extends FsShell {
     /** Constructor */
     /** Constructor */
     ClearQuotaCommand(String[] args, int pos, FileSystem fs) {
     ClearQuotaCommand(String[] args, int pos, FileSystem fs) {
       super(fs);
       super(fs);
-      CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE);
+      CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       List<String> parameters = c.parse(args, pos);
       this.args = parameters.toArray(new String[parameters.size()]);
       this.args = parameters.toArray(new String[parameters.size()]);
     }
     }
@@ -140,7 +139,7 @@ public class DFSAdmin extends FsShell {
     /** Constructor */
     /** Constructor */
     SetQuotaCommand(String[] args, int pos, FileSystem fs) {
     SetQuotaCommand(String[] args, int pos, FileSystem fs) {
       super(fs);
       super(fs);
-      CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE);
+      CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       List<String> parameters = c.parse(args, pos);
       this.quota = Long.parseLong(parameters.remove(0));
       this.quota = Long.parseLong(parameters.remove(0));
       this.args = parameters.toArray(new String[parameters.size()]);
       this.args = parameters.toArray(new String[parameters.size()]);
@@ -180,7 +179,7 @@ public class DFSAdmin extends FsShell {
     /** Constructor */
     /** Constructor */
     ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
     ClearSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
       super(fs);
       super(fs);
-      CommandFormat c = new CommandFormat(NAME, 1, Integer.MAX_VALUE);
+      CommandFormat c = new CommandFormat(1, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       List<String> parameters = c.parse(args, pos);
       this.args = parameters.toArray(new String[parameters.size()]);
       this.args = parameters.toArray(new String[parameters.size()]);
     }
     }
@@ -228,7 +227,7 @@ public class DFSAdmin extends FsShell {
     /** Constructor */
     /** Constructor */
     SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
     SetSpaceQuotaCommand(String[] args, int pos, FileSystem fs) {
       super(fs);
       super(fs);
-      CommandFormat c = new CommandFormat(NAME, 2, Integer.MAX_VALUE);
+      CommandFormat c = new CommandFormat(2, Integer.MAX_VALUE);
       List<String> parameters = c.parse(args, pos);
       List<String> parameters = c.parse(args, pos);
       String str = parameters.remove(0).trim();
       String str = parameters.remove(0).trim();
       quota = StringUtils.TraditionalBinaryPrefix.string2long(str);
       quota = StringUtils.TraditionalBinaryPrefix.string2long(str);
@@ -327,10 +326,8 @@ public class DFSAdmin extends FsShell {
 
 
       System.out.println("-------------------------------------------------");
       System.out.println("-------------------------------------------------");
       
       
-      DatanodeInfo[] live = dfs.getClient().datanodeReport(
-                                                   DatanodeReportType.LIVE);
-      DatanodeInfo[] dead = dfs.getClient().datanodeReport(
-                                                   DatanodeReportType.DEAD);
+      DatanodeInfo[] live = dfs.getDataNodeStats(DatanodeReportType.LIVE);
+      DatanodeInfo[] dead = dfs.getDataNodeStats(DatanodeReportType.DEAD);
       System.out.println("Datanodes available: " + live.length +
       System.out.println("Datanodes available: " + live.length +
                          " (" + (live.length + dead.length) + " total, " + 
                          " (" + (live.length + dead.length) + " total, " + 
                          dead.length + " dead)\n");
                          dead.length + " dead)\n");
@@ -691,9 +688,8 @@ public class DFSAdmin extends FsShell {
    */
    */
   public int printTopology() throws IOException {
   public int printTopology() throws IOException {
       DistributedFileSystem dfs = getDFS();
       DistributedFileSystem dfs = getDFS();
-      DFSClient client = dfs.getClient();
-      DatanodeInfo[] report = client.datanodeReport(DatanodeReportType.ALL);
-      
+      final DatanodeInfo[] report = dfs.getDataNodeStats();
+
       // Build a map of rack -> nodes from the datanode report
       // Build a map of rack -> nodes from the datanode report
       HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
       HashMap<String, TreeSet<String> > tree = new HashMap<String, TreeSet<String>>();
       for(DatanodeInfo dni : report) {
       for(DatanodeInfo dni : report) {

+ 5 - 2
hdfs/src/java/org/apache/hadoop/hdfs/DFSClientAdapter.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSClientAdapter.java

@@ -23,10 +23,13 @@ import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 
 
 public class DFSClientAdapter {
 public class DFSClientAdapter {
+  public static DFSClient getDFSClient(DistributedFileSystem dfs) {
+    return dfs.dfs;
+  }
   
   
-  public static void stopLeaseRenewer(DFSClient dfsClient) throws IOException {
+  public static void stopLeaseRenewer(DistributedFileSystem dfs) throws IOException {
     try {
     try {
-      dfsClient.leaserenewer.interruptAndJoin();
+      dfs.dfs.leaserenewer.interruptAndJoin();
     } catch (InterruptedException e) {
     } catch (InterruptedException e) {
       throw new IOException(e);
       throw new IOException(e);
     }
     }

+ 0 - 10
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/DFSTestUtil.java

@@ -52,8 +52,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.FileSystem.Statistics;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
 import org.apache.hadoop.hdfs.DFSClient.DFSDataInputStream;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
-import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
@@ -522,14 +520,6 @@ public class DFSTestUtil {
       FSDataOutputStream out) {
       FSDataOutputStream out) {
     return ((DFSOutputStream) out.getWrappedStream()).getBlockToken();
     return ((DFSOutputStream) out.getWrappedStream()).getBlockToken();
   }
   }
-  
-  public static ClientDatanodeProtocol createClientDatanodeProtocolProxy(
-      DatanodeID datanodeid, Configuration conf, int socketTimeout,
-      LocatedBlock locatedBlock)
-      throws IOException {
-    return DFSClient.createClientDatanodeProtocolProxy(
-        datanodeid, conf, socketTimeout, locatedBlock);
-  }
 
 
   static void setLogLevel2All(org.apache.commons.logging.Log log) {
   static void setLogLevel2All(org.apache.commons.logging.Log log) {
     ((org.apache.commons.logging.impl.Log4JLogger)log
     ((org.apache.commons.logging.impl.Log4JLogger)log

+ 1 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java

@@ -71,7 +71,7 @@ public class TestAbandonBlock {
     fout.hflush();
     fout.hflush();
 
 
     // Now abandon the last block
     // Now abandon the last block
-    DFSClient dfsclient = ((DistributedFileSystem)fs).getClient();
+    DFSClient dfsclient = DFSClientAdapter.getDFSClient((DistributedFileSystem)fs);
     LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
     LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
     LocatedBlock b = blocks.getLastLocatedBlock();
     LocatedBlock b = blocks.getLastLocatedBlock();
     dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName);
     dfsclient.getNamenode().abandonBlock(b.getBlock(), src, dfsclient.clientName);

+ 20 - 23
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -43,7 +43,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.FsShell;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.shell.Count;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset;
@@ -701,10 +700,10 @@ public class TestDFSShell extends TestCase {
       String root = createTree(dfs, "count");
       String root = createTree(dfs, "count");
 
 
       // Verify the counts
       // Verify the counts
-      runCount(root, 2, 4, conf);
-      runCount(root + "2", 2, 1, conf);
-      runCount(root + "2/f1", 0, 1, conf);
-      runCount(root + "2/sub", 1, 0, conf);
+      runCount(root, 2, 4, shell);
+      runCount(root + "2", 2, 1, shell);
+      runCount(root + "2/f1", 0, 1, shell);
+      runCount(root + "2/sub", 1, 0, shell);
 
 
       final FileSystem localfs = FileSystem.getLocal(conf);
       final FileSystem localfs = FileSystem.getLocal(conf);
       Path localpath = new Path(TEST_ROOT_DIR, "testcount");
       Path localpath = new Path(TEST_ROOT_DIR, "testcount");
@@ -714,8 +713,8 @@ public class TestDFSShell extends TestCase {
       
       
       final String localstr = localpath.toString();
       final String localstr = localpath.toString();
       System.out.println("localstr=" + localstr);
       System.out.println("localstr=" + localstr);
-      runCount(localstr, 1, 0, conf);
-      assertEquals(0, new Count(new String[]{root, localstr}, 0, conf).runAll());
+      runCount(localstr, 1, 0, shell);
+      assertEquals(0, runCmd(shell, "-count", root, localstr));
     } finally {
     } finally {
       try {
       try {
         dfs.close();
         dfs.close();
@@ -724,7 +723,7 @@ public class TestDFSShell extends TestCase {
       cluster.shutdown();
       cluster.shutdown();
     }
     }
   }
   }
-  private void runCount(String path, long dirs, long files, Configuration conf
+  private static void runCount(String path, long dirs, long files, FsShell shell
     ) throws IOException {
     ) throws IOException {
     ByteArrayOutputStream bytes = new ByteArrayOutputStream(); 
     ByteArrayOutputStream bytes = new ByteArrayOutputStream(); 
     PrintStream out = new PrintStream(bytes);
     PrintStream out = new PrintStream(bytes);
@@ -733,7 +732,7 @@ public class TestDFSShell extends TestCase {
     Scanner in = null;
     Scanner in = null;
     String results = null;
     String results = null;
     try {
     try {
-      new Count(new String[]{path}, 0, conf).runAll();
+      runCmd(shell, "-count", path);
       results = bytes.toString();
       results = bytes.toString();
       in = new Scanner(results);
       in = new Scanner(results);
       assertEquals(dirs, in.nextLong());
       assertEquals(dirs, in.nextLong());
@@ -747,7 +746,7 @@ public class TestDFSShell extends TestCase {
   }
   }
 
 
   //throws IOException instead of Exception as shell.run() does.
   //throws IOException instead of Exception as shell.run() does.
-  private int runCmd(FsShell shell, String... args) throws IOException {
+  private static int runCmd(FsShell shell, String... args) throws IOException {
     StringBuilder cmdline = new StringBuilder("RUN:");
     StringBuilder cmdline = new StringBuilder("RUN:");
     for (String arg : args) cmdline.append(" " + arg);
     for (String arg : args) cmdline.append(" " + arg);
     LOG.info(cmdline.toString());
     LOG.info(cmdline.toString());
@@ -1362,48 +1361,46 @@ public class TestDFSShell extends TestCase {
         .format(true).build();
         .format(true).build();
     FsShell shell = null;
     FsShell shell = null;
     FileSystem fs = null;
     FileSystem fs = null;
-    File localFile = new File("testFileForPut");
-    Path hdfsTestDir = new Path("ForceTestDir");
+    final File localFile = new File(TEST_ROOT_DIR, "testFileForPut");
+    final String localfilepath = localFile.getAbsolutePath();
+    final String testdir = TEST_ROOT_DIR + "/ForceTestDir";
+    final Path hdfsTestDir = new Path(testdir);
     try {
     try {
       fs = cluster.getFileSystem();
       fs = cluster.getFileSystem();
       fs.mkdirs(hdfsTestDir);
       fs.mkdirs(hdfsTestDir);
       localFile.createNewFile();
       localFile.createNewFile();
-      writeFile(fs, new Path("testFileForPut"));
+      writeFile(fs, new Path(TEST_ROOT_DIR, "testFileForPut"));
       shell = new FsShell();
       shell = new FsShell();
 
 
       // Tests for put
       // Tests for put
-      String[] argv = new String[] { "-put", "-f", localFile.getName(),
-          "ForceTestDir" };
+      String[] argv = new String[] { "-put", "-f", localfilepath, testdir };
       int res = ToolRunner.run(shell, argv);
       int res = ToolRunner.run(shell, argv);
       int SUCCESS = 0;
       int SUCCESS = 0;
       int ERROR = 1;
       int ERROR = 1;
       assertEquals("put -f is not working", SUCCESS, res);
       assertEquals("put -f is not working", SUCCESS, res);
 
 
-      argv = new String[] { "-put", localFile.getName(), "ForceTestDir" };
+      argv = new String[] { "-put", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
       res = ToolRunner.run(shell, argv);
       assertEquals("put command itself is able to overwrite the file", ERROR,
       assertEquals("put command itself is able to overwrite the file", ERROR,
           res);
           res);
 
 
       // Tests for copyFromLocal
       // Tests for copyFromLocal
-      argv = new String[] { "-copyFromLocal", "-f", localFile.getName(),
-          "ForceTestDir" };
+      argv = new String[] { "-copyFromLocal", "-f", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
       res = ToolRunner.run(shell, argv);
       assertEquals("copyFromLocal -f is not working", SUCCESS, res);
       assertEquals("copyFromLocal -f is not working", SUCCESS, res);
 
 
-      argv = new String[] { "-copyFromLocal", localFile.getName(),
-          "ForceTestDir" };
+      argv = new String[] { "-copyFromLocal", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
       res = ToolRunner.run(shell, argv);
       assertEquals(
       assertEquals(
           "copyFromLocal command itself is able to overwrite the file", ERROR,
           "copyFromLocal command itself is able to overwrite the file", ERROR,
           res);
           res);
 
 
       // Tests for cp
       // Tests for cp
-      argv = new String[] { "-cp", "-f", localFile.getName(), "ForceTestDir" };
+      argv = new String[] { "-cp", "-f", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
       res = ToolRunner.run(shell, argv);
       assertEquals("cp -f is not working", SUCCESS, res);
       assertEquals("cp -f is not working", SUCCESS, res);
 
 
-      argv = new String[] { "-cp", localFile.getName(),
-          "ForceTestDir" };
+      argv = new String[] { "-cp", localfilepath, testdir };
       res = ToolRunner.run(shell, argv);
       res = ToolRunner.run(shell, argv);
       assertEquals("cp command itself is able to overwrite the file", ERROR,
       assertEquals("cp command itself is able to overwrite the file", ERROR,
           res);
           res);

+ 1 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -80,7 +80,7 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
       String filestr = "/foo";
       String filestr = "/foo";
       Path filepath = new Path(filestr);
       Path filepath = new Path(filestr);
       DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
       DFSTestUtil.createFile(dfs, filepath, ORG_FILE_SIZE, REPLICATION_NUM, 0L);
-      assertTrue(dfs.dfs.exists(filestr));
+      assertTrue(dfs.exists(filepath));
       DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
       DFSTestUtil.waitReplication(dfs, filepath, REPLICATION_NUM);
 
 
       //get block info for the last block
       //get block info for the last block

+ 3 - 9
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java

@@ -74,7 +74,7 @@ public class TestReplication extends TestCase {
   private void checkFile(FileSystem fileSys, Path name, int repl)
   private void checkFile(FileSystem fileSys, Path name, int repl)
     throws IOException {
     throws IOException {
     Configuration conf = fileSys.getConf();
     Configuration conf = fileSys.getConf();
-    ClientProtocol namenode = DFSClient.createNamenode(conf);
+    ClientProtocol namenode = DFSUtil.createNamenode(conf);
       
       
     waitForBlockReplication(name.toString(), namenode, 
     waitForBlockReplication(name.toString(), namenode, 
                             Math.min(numDatanodes, repl), -1);
                             Math.min(numDatanodes, repl), -1);
@@ -255,7 +255,6 @@ public class TestReplication extends TestCase {
     
     
     //wait for all the blocks to be replicated;
     //wait for all the blocks to be replicated;
     LOG.info("Checking for block replication for " + filename);
     LOG.info("Checking for block replication for " + filename);
-    int iters = 0;
     while (true) {
     while (true) {
       boolean replOk = true;
       boolean replOk = true;
       LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, 
       LocatedBlocks blocks = namenode.getBlockLocations(filename, 0, 
@@ -266,11 +265,8 @@ public class TestReplication extends TestCase {
         LocatedBlock block = iter.next();
         LocatedBlock block = iter.next();
         int actual = block.getLocations().length;
         int actual = block.getLocations().length;
         if ( actual < expected ) {
         if ( actual < expected ) {
-          if (true || iters > 0) {
-            LOG.info("Not enough replicas for " + block.getBlock() +
-                               " yet. Expecting " + expected + ", got " + 
-                               actual + ".");
-          }
+          LOG.info("Not enough replicas for " + block.getBlock()
+              + " yet. Expecting " + expected + ", got " + actual + ".");
           replOk = false;
           replOk = false;
           break;
           break;
         }
         }
@@ -280,8 +276,6 @@ public class TestReplication extends TestCase {
         return;
         return;
       }
       }
       
       
-      iters++;
-      
       if (maxWaitSec > 0 && 
       if (maxWaitSec > 0 && 
           (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) {
           (System.currentTimeMillis() - start) > (maxWaitSec * 1000)) {
         throw new IOException("Timedout while waiting for all blocks to " +
         throw new IOException("Timedout while waiting for all blocks to " +

+ 22 - 10
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/TestWriteConfigurationToDFS.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.io.IOUtils;
+
 import java.io.OutputStream;
 import java.io.OutputStream;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -35,15 +37,25 @@ public class TestWriteConfigurationToDFS {
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     conf.setInt(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 4096);
     System.out.println("Setting conf in: " + System.identityHashCode(conf));
     System.out.println("Setting conf in: " + System.identityHashCode(conf));
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
     MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
-    FileSystem fs = cluster.getFileSystem();
-    Path filePath = new Path("/testWriteConf.xml");
-    OutputStream os = fs.create(filePath);
-    StringBuilder longString = new StringBuilder();
-    for (int i = 0; i < 100000; i++) {
-      longString.append("hello");
-    } // 500KB
-    conf.set("foobar", longString.toString());
-    conf.writeXml(os);
-    os.close();
+    FileSystem fs = null;
+    OutputStream os = null;
+    try {
+      fs = cluster.getFileSystem();
+      Path filePath = new Path("/testWriteConf.xml");
+      os = fs.create(filePath);
+      StringBuilder longString = new StringBuilder();
+      for (int i = 0; i < 100000; i++) {
+        longString.append("hello");
+      } // 500KB
+      conf.set("foobar", longString.toString());
+      conf.writeXml(os);
+      os.close();
+      os = null;
+      fs.close();
+      fs = null;
+    } finally {
+      IOUtils.cleanup(null, os, fs);
+      cluster.shutdown();
+    }
   }
   }
 }
 }

+ 16 - 16
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/security/token/block/TestBlockToken.java

@@ -18,6 +18,19 @@
 
 
 package org.apache.hadoop.hdfs.security.token.block;
 package org.apache.hadoop.hdfs.security.token.block;
 
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+import static org.mockito.Matchers.any;
+import static org.mockito.Matchers.anyInt;
+import static org.mockito.Matchers.anyLong;
+import static org.mockito.Matchers.anyString;
+import static org.mockito.Mockito.doAnswer;
+import static org.mockito.Mockito.doReturn;
+import static org.mockito.Mockito.mock;
+import static org.mockito.Mockito.when;
+
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayInputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
 import java.io.File;
 import java.io.File;
@@ -33,13 +46,12 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -58,21 +70,9 @@ import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
-
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Assume;
 import org.junit.Assume;
 import org.junit.Test;
 import org.junit.Test;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
-import static org.junit.Assert.*;
-import static org.mockito.Matchers.any;
-import static org.mockito.Matchers.anyInt;
-import static org.mockito.Matchers.anyLong;
-import static org.mockito.Matchers.anyString;
-import static org.mockito.Mockito.doAnswer;
-import static org.mockito.Mockito.doReturn;
-import static org.mockito.Mockito.mock;
-import static org.mockito.Mockito.when;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.invocation.InvocationOnMock;
 import org.mockito.stubbing.Answer;
 import org.mockito.stubbing.Answer;
 
 
@@ -293,7 +293,7 @@ public class TestBlockToken {
     try {
     try {
       long endTime = System.currentTimeMillis() + 3000;
       long endTime = System.currentTimeMillis() + 3000;
       while (System.currentTimeMillis() < endTime) {
       while (System.currentTimeMillis() < endTime) {
-        proxy = DFSTestUtil.createClientDatanodeProtocolProxy(
+        proxy = DFSUtil.createClientDatanodeProtocolProxy(
             fakeDnId, conf, 1000, fakeBlock);
             fakeDnId, conf, 1000, fakeBlock);
         assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
         assertEquals(block3.getBlockId(), proxy.getReplicaVisibleLength(block3));
         if (proxy != null) {
         if (proxy != null) {

+ 5 - 5
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -32,9 +32,9 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.Block;
@@ -99,7 +99,7 @@ public class TestBalancer extends TestCase {
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
     cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();
-      client = DFSClient.createNamenode(conf);
+      client = DFSUtil.createNamenode(conf);
 
 
       short replicationFactor = (short)(numNodes-1);
       short replicationFactor = (short)(numNodes-1);
       long fileLen = size/replicationFactor;
       long fileLen = size/replicationFactor;
@@ -193,7 +193,7 @@ public class TestBalancer extends TestCase {
                                               .simulatedCapacities(capacities)
                                               .simulatedCapacities(capacities)
                                               .build();
                                               .build();
     cluster.waitActive();
     cluster.waitActive();
-    client = DFSClient.createNamenode(conf);
+    client = DFSUtil.createNamenode(conf);
 
 
     for(int i = 0; i < blocksDN.length; i++)
     for(int i = 0; i < blocksDN.length; i++)
       cluster.injectBlocks(i, Arrays.asList(blocksDN[i]));
       cluster.injectBlocks(i, Arrays.asList(blocksDN[i]));
@@ -305,7 +305,7 @@ public class TestBalancer extends TestCase {
                                 .build();
                                 .build();
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();
-      client = DFSClient.createNamenode(conf);
+      client = DFSUtil.createNamenode(conf);
 
 
       long totalCapacity = sum(capacities);
       long totalCapacity = sum(capacities);
       
       
@@ -396,7 +396,7 @@ public class TestBalancer extends TestCase {
                                 .build();
                                 .build();
     try {
     try {
       cluster.waitActive();
       cluster.waitActive();
-      client = DFSClient.createNamenode(conf);
+      client = DFSUtil.createNamenode(conf);
 
 
       long totalCapacity = sum(capacities);
       long totalCapacity = sum(capacities);
 
 

+ 3 - 4
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestReplicationPolicy.java

@@ -32,7 +32,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.DatanodeID;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.NetworkTopology;
 import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.Node;
@@ -68,9 +67,9 @@ public class TestReplicationPolicy extends TestCase {
       e.printStackTrace();
       e.printStackTrace();
       throw (RuntimeException)new RuntimeException().initCause(e);
       throw (RuntimeException)new RuntimeException().initCause(e);
     }
     }
-    FSNamesystem fsNamesystem = namenode.getNamesystem();
-    replicator = fsNamesystem.blockManager.replicator;
-    cluster = fsNamesystem.clusterMap;
+    final BlockManager bm = namenode.getNamesystem().getBlockManager();
+    replicator = bm.replicator;
+    cluster = bm.getDatanodeManager().getNetworkTopology();
     // construct network topology
     // construct network topology
     for(int i=0; i<NUM_OF_DATANODES; i++) {
     for(int i=0; i<NUM_OF_DATANODES; i++) {
       cluster.add(dataNodes[i]);
       cluster.add(dataNodes[i]);

+ 4 - 5
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/blockmanagement/TestUnderReplicatedBlocks.java

@@ -27,7 +27,6 @@ import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
-import org.apache.hadoop.hdfs.server.namenode.FSNamesystem;
 
 
 public class TestUnderReplicatedBlocks extends TestCase {
 public class TestUnderReplicatedBlocks extends TestCase {
   public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
   public void testSetrepIncWithUnderReplicatedBlocks() throws Exception {
@@ -44,11 +43,11 @@ public class TestUnderReplicatedBlocks extends TestCase {
       
       
       // remove one replica from the blocksMap so block becomes under-replicated
       // remove one replica from the blocksMap so block becomes under-replicated
       // but the block does not get put into the under-replicated blocks queue
       // but the block does not get put into the under-replicated blocks queue
-      final FSNamesystem namesystem = cluster.getNamesystem();
+      final BlockManager bm = cluster.getNamesystem().getBlockManager();
       ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
       ExtendedBlock b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
-      DatanodeDescriptor dn = namesystem.blockManager.blocksMap.nodeIterator(b.getLocalBlock()).next();
-      namesystem.blockManager.addToInvalidates(b.getLocalBlock(), dn);
-      namesystem.blockManager.blocksMap.removeNode(b.getLocalBlock(), dn);
+      DatanodeDescriptor dn = bm.blocksMap.nodeIterator(b.getLocalBlock()).next();
+      bm.addToInvalidates(b.getLocalBlock(), dn);
+      bm.blocksMap.removeNode(b.getLocalBlock(), dn);
       
       
       // increment this file's replication factor
       // increment this file's replication factor
       FsShell shell = new FsShell(conf);
       FsShell shell = new FsShell(conf);

+ 5 - 3
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java

@@ -25,6 +25,7 @@ import java.util.List;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClientAdapter;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -84,10 +85,11 @@ public class TestInterDatanodeProtocol {
       String filestr = "/foo";
       String filestr = "/foo";
       Path filepath = new Path(filestr);
       Path filepath = new Path(filestr);
       DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
       DFSTestUtil.createFile(dfs, filepath, 1024L, (short)3, 0L);
-      assertTrue(dfs.getClient().exists(filestr));
+      assertTrue(dfs.exists(filepath));
 
 
       //get block info
       //get block info
-      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().getNamenode(), filestr);
+      LocatedBlock locatedblock = getLastLocatedBlock(
+          DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
       DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       assertTrue(datanodeinfo.length > 0);
       assertTrue(datanodeinfo.length > 0);
 
 
@@ -236,7 +238,7 @@ public class TestInterDatanodeProtocol {
 
 
       //get block info
       //get block info
       final LocatedBlock locatedblock = getLastLocatedBlock(
       final LocatedBlock locatedblock = getLastLocatedBlock(
-          dfs.getClient().getNamenode(), filestr);
+          DFSClientAdapter.getDFSClient(dfs).getNamenode(), filestr);
       final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       final DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       Assert.assertTrue(datanodeinfo.length > 0);
       Assert.assertTrue(datanodeinfo.length > 0);
 
 

+ 2 - 2
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestTransferRbw.java

@@ -25,6 +25,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClientAdapter;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
@@ -32,7 +33,6 @@ import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
-import org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtocol;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.BlockOpResponseProto;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.protocol.proto.DataTransferProtos.Status;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
@@ -124,7 +124,7 @@ public class TestTransferRbw {
         final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
         final ExtendedBlock b = new ExtendedBlock(bpid, oldrbw.getBlockId(), oldrbw.getBytesAcked(),
             oldrbw.getGenerationStamp());
             oldrbw.getGenerationStamp());
         final BlockOpResponseProto s = DFSTestUtil.transferRbw(
         final BlockOpResponseProto s = DFSTestUtil.transferRbw(
-            b, fs.getClient(), oldnodeinfo, newnodeinfo);
+            b, DFSClientAdapter.getDFSClient(fs), oldnodeinfo, newnodeinfo);
         Assert.assertEquals(Status.SUCCESS, s.getStatus());
         Assert.assertEquals(Status.SUCCESS, s.getStatus());
       }
       }
 
 

+ 17 - 25
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/OfflineEditsViewerHelper.java

@@ -18,40 +18,34 @@
 
 
 package org.apache.hadoop.hdfs.server.namenode;
 package org.apache.hadoop.hdfs.server.namenode;
 
 
-import java.io.IOException;
-import java.util.Iterator;
 import java.io.File;
 import java.io.File;
+import java.io.IOException;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
+import java.util.Iterator;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-
-import org.apache.hadoop.hdfs.DFSClient;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileContext;
+import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSClientAdapter;
 import org.apache.hadoop.hdfs.DFSClientAdapter;
-import org.apache.hadoop.hdfs.MiniDFSCluster;
-import org.apache.hadoop.hdfs.server.common.Util;
-import org.apache.hadoop.hdfs.server.namenode.FSImage;
-import org.apache.hadoop.hdfs.server.namenode.NNStorage;
-import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
-import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DistributedFileSystem;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.FSConstants;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
 import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
-import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
 import org.apache.hadoop.hdfs.security.token.delegation.DelegationTokenIdentifier;
-import org.apache.hadoop.hdfs.DFSTestUtil;
-import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
+import org.apache.hadoop.hdfs.server.common.Util;
+import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileContext;
-import org.apache.hadoop.fs.Path;
-import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.security.token.Token;
 
 
 /**
 /**
  * OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer,
  * OfflineEditsViewerHelper is a helper class for TestOfflineEditsViewer,
@@ -207,8 +201,6 @@ public class OfflineEditsViewerHelper {
     // fake the user to renew token for
     // fake the user to renew token for
     UserGroupInformation longUgi = UserGroupInformation.createRemoteUser(
     UserGroupInformation longUgi = UserGroupInformation.createRemoteUser(
       "JobTracker/foo.com@FOO.COM");
       "JobTracker/foo.com@FOO.COM");
-    UserGroupInformation shortUgi = UserGroupInformation.createRemoteUser(
-      "JobTracker");
     try {
     try {
       longUgi.doAs(new PrivilegedExceptionAction<Object>() {
       longUgi.doAs(new PrivilegedExceptionAction<Object>() {
         public Object run() throws IOException {
         public Object run() throws IOException {
@@ -232,7 +224,7 @@ public class OfflineEditsViewerHelper {
     // OP_REASSIGN_LEASE 22
     // OP_REASSIGN_LEASE 22
     String filePath = "/hard-lease-recovery-test";
     String filePath = "/hard-lease-recovery-test";
     byte[] bytes = "foo-bar-baz".getBytes();
     byte[] bytes = "foo-bar-baz".getBytes();
-    DFSClientAdapter.stopLeaseRenewer(dfs.getClient());
+    DFSClientAdapter.stopLeaseRenewer(dfs);
     FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath));
     FSDataOutputStream leaseRecoveryPath = dfs.create(new Path(filePath));
     leaseRecoveryPath.write(bytes);
     leaseRecoveryPath.write(bytes);
     leaseRecoveryPath.hflush();
     leaseRecoveryPath.hflush();

+ 2 - 1
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockUnderConstruction.java

@@ -27,6 +27,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.BlockLocation;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSClientAdapter;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -72,7 +73,7 @@ public class TestBlockUnderConstruction {
     // wait until the block is allocated by DataStreamer
     // wait until the block is allocated by DataStreamer
     BlockLocation[] locatedBlocks;
     BlockLocation[] locatedBlocks;
     while(blocksAfter <= blocksBefore) {
     while(blocksAfter <= blocksBefore) {
-      locatedBlocks = hdfs.getClient().getBlockLocations(
+      locatedBlocks = DFSClientAdapter.getDFSClient(hdfs).getBlockLocations(
           file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
           file.toString(), 0L, BLOCK_SIZE*NUM_BLOCKS);
       blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
       blocksAfter = locatedBlocks == null ? 0 : locatedBlocks.length;
     }
     }

+ 2 - 2
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestEditLogRace.java

@@ -353,7 +353,7 @@ public class TestEditLogRace {
   @Test
   @Test
   public void testSaveImageWhileSyncInProgress() throws Exception {
   public void testSaveImageWhileSyncInProgress() throws Exception {
     Configuration conf = getConf();
     Configuration conf = getConf();
-    NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     DFSTestUtil.formatNameNode(conf);
     DFSTestUtil.formatNameNode(conf);
     final FSNamesystem namesystem = new FSNamesystem(conf);
     final FSNamesystem namesystem = new FSNamesystem(conf);
 
 
@@ -451,7 +451,7 @@ public class TestEditLogRace {
   @Test
   @Test
   public void testSaveRightBeforeSync() throws Exception {
   public void testSaveRightBeforeSync() throws Exception {
     Configuration conf = getConf();
     Configuration conf = getConf();
-    NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     DFSTestUtil.formatNameNode(conf);
     DFSTestUtil.formatNameNode(conf);
     final FSNamesystem namesystem = new FSNamesystem(conf);
     final FSNamesystem namesystem = new FSNamesystem(conf);
 
 

+ 5 - 5
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestSaveNamespace.java

@@ -105,7 +105,7 @@ public class TestSaveNamespace {
 
 
   private void saveNamespaceWithInjectedFault(Fault fault) throws Exception {
   private void saveNamespaceWithInjectedFault(Fault fault) throws Exception {
     Configuration conf = getConf();
     Configuration conf = getConf();
-    NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     DFSTestUtil.formatNameNode(conf);
     DFSTestUtil.formatNameNode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
 
@@ -208,7 +208,7 @@ public class TestSaveNamespace {
     Configuration conf = getConf();
     Configuration conf = getConf();
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_RESTORE_KEY, true);
 
 
-    NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     DFSTestUtil.formatNameNode(conf);
     DFSTestUtil.formatNameNode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
 
@@ -343,7 +343,7 @@ public class TestSaveNamespace {
   public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure)
   public void doTestFailedSaveNamespace(boolean restoreStorageAfterFailure)
   throws Exception {
   throws Exception {
     Configuration conf = getConf();
     Configuration conf = getConf();
-    NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     DFSTestUtil.formatNameNode(conf);
     DFSTestUtil.formatNameNode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
 
@@ -405,7 +405,7 @@ public class TestSaveNamespace {
   @Test
   @Test
   public void testSaveWhileEditsRolled() throws Exception {
   public void testSaveWhileEditsRolled() throws Exception {
     Configuration conf = getConf();
     Configuration conf = getConf();
-    NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     DFSTestUtil.formatNameNode(conf);
     DFSTestUtil.formatNameNode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
 
@@ -441,7 +441,7 @@ public class TestSaveNamespace {
   @Test
   @Test
   public void testTxIdPersistence() throws Exception {
   public void testTxIdPersistence() throws Exception {
     Configuration conf = getConf();
     Configuration conf = getConf();
-    NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
     DFSTestUtil.formatNameNode(conf);
     DFSTestUtil.formatNameNode(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
     FSNamesystem fsn = new FSNamesystem(conf);
 
 

+ 1 - 1
hdfs/src/test/unit/org/apache/hadoop/hdfs/server/namenode/TestNNLeaseRecovery.java

@@ -79,7 +79,7 @@ public class TestNNLeaseRecovery {
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR);
     conf.set(DFSConfigKeys.DFS_NAMENODE_EDITS_DIR_KEY, NAME_DIR);
     // avoid stubbing access control
     // avoid stubbing access control
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); 
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, false); 
-    NameNode.initMetrics(conf, NamenodeRole.ACTIVE);
+    NameNode.initMetrics(conf, NamenodeRole.NAMENODE);
 
 
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
     FileSystem.setDefaultUri(conf, "hdfs://localhost:0");
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "0.0.0.0:0");