瀏覽代碼

HADOOP-354. Make public methods to stop DFS daemons. Contributed by Barry Kaplan.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@421837 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 年之前
父節點
當前提交
a60044aacd

+ 3 - 0
CHANGES.txt

@@ -34,6 +34,9 @@ Trunk (unreleased changes)
  9. HADOOP-361.  Remove unix dependencies from streaming contrib
     module tests, making them pure java. (Michel Tourn via cutting)
 
+10. HADOOP-354.  Make public methods to stop DFS daemons.
+   (Barry Kaplan via cutting)
+
 
 Release 0.4.0 - 2006-06-28
 

+ 18 - 5
src/java/org/apache/hadoop/dfs/DataNode.java

@@ -82,7 +82,7 @@ public class DataNode implements FSConstants, Runnable {
         return new InetSocketAddress(host, port);
     }
 
-    private static Vector subThreadList = null;
+    private static Map subDataNodeList = null;
     DatanodeProtocol namenode;
     FSDataset data;
     DatanodeRegistration dnRegistration;
@@ -193,7 +193,7 @@ public class DataNode implements FSConstants, Runnable {
      * Shut down this instance of the datanode.
      * Returns only after shutdown is complete.
      */
-    void shutdown() {
+    public void shutdown() {
         this.shouldRun = false;
         ((DataXceiveServer) this.dataXceiveServer.getRunnable()).kill();
         try {
@@ -202,6 +202,19 @@ public class DataNode implements FSConstants, Runnable {
         }
     }
 
+    /**
+     * Shut down all datanodes that where started via the run(conf) method.
+     * Returns only after shutdown is complete.
+     */
+    public static void shutdownAll(){
+      if(subDataNodeList != null && !subDataNodeList.isEmpty()){
+        for (Iterator iterator = subDataNodeList.keySet().iterator(); iterator.hasNext();) {
+          DataNode dataNode = (DataNode) iterator.next();
+          dataNode.shutdown();
+        }
+      }
+    }
+
     void handleDiskError( String errMsgr ) {
         LOG.warn( "DataNode is shutting down.\n" + errMsgr );
         try {
@@ -880,14 +893,14 @@ public class DataNode implements FSConstants, Runnable {
      */
     public static void run(Configuration conf) throws IOException {
         String[] dataDirs = conf.getStrings("dfs.data.dir");
-        subThreadList = new Vector(dataDirs.length);
+        subDataNodeList = new HashMap(dataDirs.length);
         for (int i = 0; i < dataDirs.length; i++) {
           DataNode dn = makeInstanceForDir(dataDirs[i], conf);
           if (dn != null) {
             Thread t = new Thread(dn, "DataNode: "+dataDirs[i]);
             t.setDaemon(true); // needed for JUnit testing
             t.start();
-            subThreadList.add(t);
+            subDataNodeList.put(dn,t);
           }
         }
     }
@@ -901,7 +914,7 @@ public class DataNode implements FSConstants, Runnable {
     run(conf);
 
     //  Wait for sub threads to exit
-    for (Iterator iterator = subThreadList.iterator(); iterator.hasNext();) {
+    for (Iterator iterator = subDataNodeList.entrySet().iterator(); iterator.hasNext();) {
       Thread threadDataNode = (Thread) iterator.next();
       try {
         threadDataNode.join();

+ 1 - 2
src/java/org/apache/hadoop/dfs/NameNode.java

@@ -114,9 +114,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
 
     /**
      * Stop all NameNode threads and wait for all to finish.
-     * Package-only access since this is intended for JUnit testing.
     */
-    void stop() {
+    public void stop() {
       if (! stopRequested) {
         stopRequested = true;
         namesystem.close();

+ 8 - 0
src/java/org/apache/hadoop/ipc/RPC.java

@@ -135,6 +135,14 @@ public class RPC {
     return CLIENT;
   }
 
+  /**
+   * Stop all RPC client connections
+   */
+  public static synchronized void stopClient(){
+    if(CLIENT != null)
+      CLIENT.stop();
+  }
+
   private static class Invoker implements InvocationHandler {
     private InetSocketAddress address;
     private Client client;