Browse Source

HDFS-1791. Federation: Add command to delete block pool directories from a datanode. Contributed by jitendra.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1086788 13f79535-47bb-0310-9956-ffa450edef68
Jitendra Nath Pandey 14 years ago
parent
commit
80b37a0606

+ 3 - 0
CHANGES.txt

@@ -248,6 +248,9 @@ Trunk (unreleased changes)
 
     HDFS-1675. Support transferring RBW between datanodes. (szetszwo)
 
+    HDFS-1791. Federation: Add command to delete block pool directories 
+    from a datanode. (jitendra)
+
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)

+ 14 - 2
src/java/org/apache/hadoop/hdfs/protocol/ClientDatanodeProtocol.java

@@ -40,9 +40,9 @@ public interface ClientDatanodeProtocol extends VersionedProtocol {
   public static final Log LOG = LogFactory.getLog(ClientDatanodeProtocol.class);
 
   /**
-   * 8: Add refreshNamenodes method
+   * 9: Added deleteBlockPool method
    */
-  public static final long versionID = 8L;
+  public static final long versionID = 9L;
 
   /** Return the visible length of a replica. */
   long getReplicaVisibleLength(ExtendedBlock b) throws IOException;
@@ -54,4 +54,16 @@ public interface ClientDatanodeProtocol extends VersionedProtocol {
    * @throws IOException on error
    **/
   void refreshNamenodes() throws IOException;
+
+  /**
+   * Delete the block pool directory. If force is false it is deleted only if
+   * it is empty, otherwise it is deleted along with its contents.
+   * 
+   * @param bpid Blockpool id to be deleted.
+   * @param force If false blockpool directory is deleted only if it is empty 
+   *          i.e. if it doesn't contain any block files, otherwise it is 
+   *          deleted along with its contents.
+   * @throws IOException
+   */
+  void deleteBlockPool(String bpid, boolean force) throws IOException; 
 }

+ 20 - 1
src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -91,6 +91,7 @@ import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.Util;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
+import org.apache.hadoop.hdfs.server.datanode.FSDataset.FSVolume;
 import org.apache.hadoop.hdfs.server.datanode.FSDataset.VolumeInfo;
 import org.apache.hadoop.hdfs.server.datanode.SecureDataNodeStarter.SecureResources;
 import org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics;
@@ -997,6 +998,9 @@ public class DataNode extends Configured
       if (blockScanner != null) {
         blockScanner.removeBlockPool(this.getBlockPoolId());
       }
+     
+      data.shutdownBlockPool(this.getBlockPoolId());
+      storage.removeBlockPoolStorage(this.getBlockPoolId());
     }
 
     /**
@@ -2650,6 +2654,22 @@ public class DataNode extends Configured
     conf = new Configuration();
     refreshNamenodes(conf);
   }
+  
+  @Override // ClientDatanodeProtocol
+  public void deleteBlockPool(String blockPoolId, boolean force)
+      throws IOException {
+    LOG.info("deleteBlockPool command received for block pool " + blockPoolId
+        + ", force=" + force);
+    if (blockPoolManager.get(blockPoolId) != null) {
+      LOG.warn("The block pool "+blockPoolId+
+          " is still running, cannot be deleted.");
+      throw new IOException(
+          "The block pool is still running. First do a refreshNamenodes to " +
+          "shutdown the block pool service");
+    }
+   
+    data.deleteBlockPool(blockPoolId, force);
+  }
 
   /**
    * @param addr rpc address of the namenode
@@ -2686,7 +2706,6 @@ public class DataNode extends Configured
   
   /** Methods used by fault injection tests */
   public DatanodeID getDatanodeId() {
-    LOG.info("SURESH machienname " + getMachineName());
     return new DatanodeID(getMachineName(), getStorageId(),
         infoServer.getPort(), getIpcPort());
   }

+ 5 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/DataStorage.java

@@ -68,6 +68,7 @@ public class DataStorage extends Storage {
   final static String STORAGE_DIR_DETACHED = "detach";
   public final static String STORAGE_DIR_RBW = "rbw";
   public final static String STORAGE_DIR_FINALIZED = "finalized";
+  public final static String STORAGE_DIR_TMP = "tmp";
 
   private static final Pattern PRE_GENSTAMP_META_FILE_PATTERN = 
     Pattern.compile("(.*blk_[-]*\\d+)\\.meta$");
@@ -755,4 +756,8 @@ public class DataStorage extends Storage {
       this.bpStorageMap.put(bpID, bpStorage);
     }
   }
+
+  synchronized void removeBlockPoolStorage(String bpId) {
+    bpStorageMap.remove(bpId);
+  }
 }

+ 99 - 1
src/java/org/apache/hadoop/hdfs/server/datanode/FSDataset.java

@@ -66,6 +66,7 @@ import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.DiskChecker.DiskErrorException;
 import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
 import org.apache.hadoop.hdfs.server.common.GenerationStamp;
+import org.apache.hadoop.hdfs.server.common.Storage;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.ReplicaState;
 import org.apache.hadoop.io.IOUtils;
 
@@ -323,7 +324,7 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
       // in the future, we might want to do some sort of datanode-local
       // recovery for these blocks. For example, crc validation.
       //
-      this.tmpDir = new File(bpDir, "tmp");
+      this.tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
       if (tmpDir.exists()) {
         FileUtil.fullyDelete(tmpDir);
       }
@@ -705,6 +706,73 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
       BlockPoolSlice bp = new BlockPoolSlice(bpid, this, bpdir, conf);
       map.put(bpid, bp);
     }
+    
+    public void shutdownBlockPool(String bpid) {
+      BlockPoolSlice bp = map.get(bpid);
+      if (bp!=null) {
+        bp.shutdown();
+      }
+      map.remove(bpid);
+    }
+
+    private boolean isBPDirEmpty(String bpid)
+        throws IOException {
+      File volumeCurrentDir = this.getCurrentDir();
+      File bpDir = new File(volumeCurrentDir, bpid);
+      File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
+      File finalizedDir = new File(bpCurrentDir,
+          DataStorage.STORAGE_DIR_FINALIZED);
+      File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
+      if (finalizedDir.exists() && finalizedDir.list().length != 0) {
+        return false;
+      }
+      if (rbwDir.exists() && rbwDir.list().length != 0) {
+        return false;
+      }
+      return true;
+    }
+    
+    private void deleteBPDirectories(String bpid, boolean force)
+        throws IOException {
+      File volumeCurrentDir = this.getCurrentDir();
+      File bpDir = new File(volumeCurrentDir, bpid);
+      if (!bpDir.isDirectory()) {
+        // nothing to be deleted
+        return;
+      }
+      File tmpDir = new File(bpDir, DataStorage.STORAGE_DIR_TMP);
+      File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
+      File finalizedDir = new File(bpCurrentDir,
+          DataStorage.STORAGE_DIR_FINALIZED);
+      File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
+      if (force) {
+        FileUtil.fullyDelete(bpDir);
+      } else {
+        if (!rbwDir.delete()) {
+          throw new IOException("Failed to delete " + rbwDir);
+        }
+        if (!finalizedDir.delete()) {
+          throw new IOException("Failed to delete " + finalizedDir);
+        }
+        FileUtil.fullyDelete(tmpDir);
+        for (File f : bpCurrentDir.listFiles()) {
+          if (!f.delete()) {
+            throw new IOException("Failed to delete " + f);
+          }
+        }
+        if (!bpCurrentDir.delete()) {
+          throw new IOException("Failed to delete " + bpCurrentDir);
+        }
+        for (File f : bpDir.listFiles()) {
+          if (!f.delete()) {
+            throw new IOException("Failed to delete " + f);
+          }
+        }
+        if (!bpDir.delete()) {
+          throw new IOException("Failed to delete " + bpDir);
+        }
+      }
+    }
   }
     
   static class FSVolumeSet {
@@ -867,6 +935,12 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
       }
     }
     
+    private void removeBlockPool(String bpid) {
+      for (FSVolume v : volumes) {
+        v.shutdownBlockPool(bpid);
+      }
+    }
+    
     /**
      * @return unmodifiable list of volumes
      */
@@ -2462,6 +2536,12 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
     volumes.getVolumeMap(bpid, volumeMap);
   }
   
+  public synchronized void shutdownBlockPool(String bpid) {
+    DataNode.LOG.info("Removing block pool " + bpid);
+    volumeMap.cleanUpBlockPool(bpid);
+    volumes.removeBlockPool(bpid);
+  }
+  
   /**
    * get list of all bpids
    * @return list of bpids
@@ -2506,4 +2586,22 @@ public class FSDataset implements FSConstants, FSDatasetInterface {
     }
     return info;
   }
+  
+  @Override //FSDatasetInterface
+  public synchronized void deleteBlockPool(String bpid, boolean force)
+      throws IOException {
+    if (!force) {
+      for (FSVolume volume : volumes.volumes) {
+        if (!volume.isBPDirEmpty(bpid)) {
+          DataNode.LOG.warn(bpid
+              + " has some block files, cannot delete unless forced");
+          throw new IOException("Cannot delete block pool, "
+              + "it contains some block files");
+        }
+      }
+    }
+    for (FSVolume volume : volumes.volumes) {
+      volume.deleteBPDirectories(bpid, force);
+    }
+  }
 }

+ 19 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/FSDatasetInterface.java

@@ -369,4 +369,23 @@ public interface FSDatasetInterface extends FSDatasetMBean {
    * @param conf Configuration
    */
   public void addBlockPool(String bpid, Configuration conf) throws IOException;
+  
+  /**
+   * Shutdown and remove the block pool from underlying storage.
+   * @param bpid Block pool Id to be removed
+   */
+  public void shutdownBlockPool(String bpid) ;
+  
+  /**
+   * Deletes the block pool directories. If force is false, directories are 
+   * deleted only if no block files exist for the block pool. If force 
+   * is true entire directory for the blockpool is deleted along with its
+   * contents.
+   * @param bpid BlockPool Id to be deleted.
+   * @param force If force is false, directories are deleted only if no
+   *        block files exist for the block pool, otherwise entire 
+   *        directory for the blockpool is deleted along with its contents.
+   * @throws IOException
+   */
+  public void deleteBlockPool(String bpid, boolean force) throws IOException;
 }

+ 7 - 0
src/java/org/apache/hadoop/hdfs/server/datanode/ReplicasMap.java

@@ -201,6 +201,13 @@ class ReplicasMap {
     }
   }
   
+  void cleanUpBlockPool(String bpid) {
+    checkBlockPool(bpid);
+    synchronized(mutex) {
+      map.remove(bpid);
+    }
+  }
+  
   /**
    * Give access to mutex used for synchronizing ReplicasMap
    * @return object used as lock

+ 56 - 22
src/java/org/apache/hadoop/hdfs/tools/DFSAdmin.java

@@ -36,6 +36,7 @@ import org.apache.hadoop.fs.shell.Command;
 import org.apache.hadoop.fs.shell.CommandFormat;
 import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
@@ -482,6 +483,7 @@ public class DFSAdmin extends FsShell {
       "\t[refreshSuperUserGroupsConfiguration]\n" +
       "\t[-printTopology]\n" +
       "\t[-refreshNamenodes datanodehost:port]\n"+
+      "\t[-deleteBlockPool datanodehost:port blockpoolId [force]]\n"+
       "\t[-help [cmd]]\n";
 
     String report ="-report: \tReports basic filesystem information and statistics.\n";
@@ -551,6 +553,15 @@ public class DFSAdmin extends FsShell {
                               "\t\tstops serving the removed block-pools\n"+
                               "\t\tand starts serving new block-pools\n";
     
+    String deleteBlockPool = "-deleteBlockPool: Arguments are datanodehost:port, blockpool id\n"+
+                             "\t\t and an optional argument \"force\". If force is passed,\n"+
+                             "\t\t block pool directory for the given blockpool id on the given\n"+
+                             "\t\t datanode is deleted along with its contents, otherwise\n"+
+                             "\t\t the directory is deleted only if it is empty. The command\n" +
+                             "\t\t will fail if datanode is still serving the block pool.\n" +
+                             "\t\t   Refer to refreshNamenodes to shutdown a block pool\n" +
+                             "\t\t service on a datanode.\n";
+    
     String help = "-help [cmd]: \tDisplays help for the given command or all commands if none\n" +
       "\t\tis specified.\n";
 
@@ -588,6 +599,8 @@ public class DFSAdmin extends FsShell {
       System.out.println(printTopology);
     } else if ("refreshNamenodes".equals(cmd)) {
       System.out.println(refreshNamenodes);
+    } else if ("deleteBlockPool".equals(cmd)) {
+      System.out.println(deleteBlockPool);
     } else if ("help".equals(cmd)) {
       System.out.println(help);
     } else {
@@ -609,6 +622,7 @@ public class DFSAdmin extends FsShell {
       System.out.println(refreshSuperUserGroupsConfiguration);
       System.out.println(printTopology);
       System.out.println(refreshNamenodes);
+      System.out.println(deleteBlockPool);
       System.out.println(help);
       System.out.println();
       ToolRunner.printGenericCommandUsage(System.out);
@@ -891,6 +905,9 @@ public class DFSAdmin extends FsShell {
     } else if ("-refreshNamenodes".equals(cmd)) {
       System.err.println("Usage: java DFSAdmin"
                          + " [-refreshNamenodes datanode-host:port]");
+    } else if ("-deleteBlockPool".equals(cmd)) {
+      System.err.println("Usage: java DFSAdmin"
+          + " [-deleteBlockPool datanode-host:port blockpoolId [force]]");
     } else {
       System.err.println("Usage: java DFSAdmin");
       System.err.println("           [-report]");
@@ -906,6 +923,7 @@ public class DFSAdmin extends FsShell {
       System.err.println("           [-refreshSuperUserGroupsConfiguration]");
       System.err.println("           [-printTopology]");
       System.err.println("           [-refreshNamenodes datanodehost:port]");
+      System.err.println("           [-deleteBlockPool datanode-host:port blockpoolId [force]]");
       System.err.println("           ["+SetQuotaCommand.USAGE+"]");
       System.err.println("           ["+ClearQuotaCommand.USAGE+"]");
       System.err.println("           ["+SetSpaceQuotaCommand.USAGE+"]");
@@ -996,6 +1014,11 @@ public class DFSAdmin extends FsShell {
         printUsage(cmd);
         return exitCode;
       }
+    } else if ("-deleteBlockPool".equals(cmd)) {
+      if ((argv.length != 3) && (argv.length != 4)) {
+        printUsage(cmd);
+        return exitCode;
+      }
     }
     
     // initialize DFSAdmin
@@ -1046,6 +1069,8 @@ public class DFSAdmin extends FsShell {
         exitCode = printTopology();
       } else if ("-refreshNamenodes".equals(cmd)) {
         exitCode = refreshNamenodes(argv, i);
+      } else if ("-deleteBlockPool".equals(cmd)) {
+        exitCode = deleteBlockPool(argv, i);
       } else if ("-help".equals(cmd)) {
         if (i < argv.length) {
           printHelp(argv[i]);
@@ -1083,33 +1108,42 @@ public class DFSAdmin extends FsShell {
     return exitCode;
   }
 
-  private int refreshNamenodes(String[] argv, int i) throws IOException {
-    String datanode = argv[i];
-    
-    int colonIndex = datanode.indexOf(':');
-    String datanodeHostname = datanode.substring(0, colonIndex);
-    String portString = datanode.substring(colonIndex+1);
-    int port = Integer.valueOf(portString).intValue();
-    
-    InetSocketAddress datanodeAddr = new InetSocketAddress(datanodeHostname,
-        port);
-    
+  private ClientDatanodeProtocol getDataNodeProxy(String datanode)
+      throws IOException {
+    InetSocketAddress datanodeAddr = DFSUtil.getSocketAddress(datanode);
     // Get the current configuration
     Configuration conf = getConf();
-    
-    // for security authorization
-    // server principal for this call   
-    // should be NN's one.
-    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY, 
+
+    // For datanode proxy the server principal should be DN's one.
+    conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
         conf.get(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, ""));
 
     // Create the client
-    ClientDatanodeProtocol refreshProtocol = (ClientDatanodeProtocol) RPC
-        .getProxy(ClientDatanodeProtocol.class,
-            ClientDatanodeProtocol.versionID, datanodeAddr, getUGI(), conf,
-            NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
-    
-    // Refresh the authorization policy in-effect
+    ClientDatanodeProtocol dnProtocol = (ClientDatanodeProtocol) RPC.getProxy(
+        ClientDatanodeProtocol.class, ClientDatanodeProtocol.versionID,
+        datanodeAddr, getUGI(), conf, NetUtils.getSocketFactory(conf,
+            ClientDatanodeProtocol.class));
+    return dnProtocol;
+  }
+  
+  private int deleteBlockPool(String[] argv, int i) throws IOException {
+    ClientDatanodeProtocol dnProxy = getDataNodeProxy(argv[i]);
+    boolean force = false;
+    if (argv.length-1 == i+2) {
+      if ("force".equals(argv[i+2])) {
+        force = true;
+      } else {
+        printUsage("-deleteBlockPool");
+        return -1;
+      }
+    }
+    dnProxy.deleteBlockPool(argv[i+1], force);
+    return 0;
+  }
+  
+  private int refreshNamenodes(String[] argv, int i) throws IOException {
+    String datanode = argv[i];
+    ClientDatanodeProtocol refreshProtocol = getDataNodeProxy(datanode);
     refreshProtocol.refreshNamenodes();
     
     return 0;

+ 15 - 0
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/SimulatedFSDataset.java

@@ -342,6 +342,10 @@ public class SimulatedFSDataset  implements FSConstants, FSDatasetInterface, Con
       map.put(bpid, new SimulatedBPStorage());
     }
     
+    synchronized void removeBlockPool(String bpid) {
+      map.remove(bpid);
+    }
+    
     private SimulatedBPStorage getBPStorage(String bpid) throws IOException {
       SimulatedBPStorage bpStorage = map.get(bpid);
       if (bpStorage == null) {
@@ -942,6 +946,17 @@ public class SimulatedFSDataset  implements FSConstants, FSDatasetInterface, Con
     blockMap.put(bpid, map);
     storage.addBlockPool(bpid);
   }
+  
+  @Override // FSDatasetInterface
+  public void shutdownBlockPool(String bpid) {
+    blockMap.remove(bpid);
+    storage.removeBlockPool(bpid);
+  }
+  
+  @Override // FSDatasetInterface
+  public void deleteBlockPool(String bpid, boolean force) {
+     return;
+  }
 
   @Override
   public ReplicaInPipelineInterface convertTemporaryToRbw(ExtendedBlock temporary)

+ 229 - 0
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDeleteBlockPool.java

@@ -0,0 +1,229 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.datanode;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.apache.hadoop.hdfs.tools.DFSAdmin;
+import org.junit.Test;
+
+/**
+ * Tests deleteBlockPool functionality.
+ */
+public class TestDeleteBlockPool {
+  
+  @Test
+  public void testDeleteBlockPool() throws Exception {
+    // Start cluster with a 2 NN and 2 DN
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    try {
+      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+          "namesServerId1,namesServerId2");
+      cluster = new MiniDFSCluster.Builder(conf).federation(true).numNameNodes(
+          2).numDataNodes(2).build();
+
+      cluster.waitActive();
+
+      FileSystem fs1 = cluster.getFileSystem(0);
+      FileSystem fs2 = cluster.getFileSystem(1);
+
+      DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 2, 54);
+      DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 2, 54);
+
+      DataNode dn1 = cluster.getDataNodes().get(0);
+      DataNode dn2 = cluster.getDataNodes().get(1);
+
+      String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
+      String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
+
+      File dn1StorageDir1 = MiniDFSCluster.getStorageDir(0, 0);
+      File dn1StorageDir2 = MiniDFSCluster.getStorageDir(0, 1);
+      File dn2StorageDir1 = MiniDFSCluster.getStorageDir(1, 0);
+      File dn2StorageDir2 = MiniDFSCluster.getStorageDir(1, 1);
+
+      // Although namenode is shutdown, the bp offerservice is still running
+      try {
+        dn1.deleteBlockPool(bpid1, true);
+        Assert.fail("Must not delete a running block pool");
+      } catch (IOException expected) {
+      }
+
+      Configuration nn1Conf = cluster.getConfiguration(1);
+      nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId2");
+      dn1.refreshNamenodes(nn1Conf);
+      assertEquals(1, dn1.getAllBpOs().length);
+
+      try {
+        dn1.deleteBlockPool(bpid1, false);
+        Assert.fail("Must not delete if any block files exist unless "
+            + "force is true");
+      } catch (IOException expected) {
+      }
+
+      verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
+      verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
+
+      dn1.deleteBlockPool(bpid1, true);
+
+      verifyBlockPoolDirectories(false, dn1StorageDir1, bpid1);
+      verifyBlockPoolDirectories(false, dn1StorageDir2, bpid1);
+     
+      fs1.delete(new Path("/alpha"), true);
+      
+      // Wait till all blocks are deleted from the dn2 for bpid1.
+      while ((MiniDFSCluster.getFinalizedDir(dn2StorageDir1, 
+          bpid1).list().length != 0) || (MiniDFSCluster.getFinalizedDir(
+              dn2StorageDir2, bpid1).list().length != 0)) {
+        try {
+          Thread.sleep(3000);
+        } catch (Exception ignored) {
+        }
+      }
+      cluster.shutdownNameNode(0);
+      
+      // Although namenode is shutdown, the bp offerservice is still running 
+      // on dn2
+      try {
+        dn2.deleteBlockPool(bpid1, true);
+        Assert.fail("Must not delete a running block pool");
+      } catch (IOException expected) {
+      }
+      
+      dn2.refreshNamenodes(nn1Conf);
+      assertEquals(1, dn2.getAllBpOs().length);
+      
+      verifyBlockPoolDirectories(true, dn2StorageDir1, bpid1);
+      verifyBlockPoolDirectories(true, dn2StorageDir2, bpid1);
+      
+      // Now deleteBlockPool must succeed with force as false, because no 
+      // blocks exist for bpid1 and bpOfferService is also stopped for bpid1.
+      dn2.deleteBlockPool(bpid1, false);
+      
+      verifyBlockPoolDirectories(false, dn2StorageDir1, bpid1);
+      verifyBlockPoolDirectories(false, dn2StorageDir2, bpid1);
+      
+      //bpid2 must not be impacted
+      verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
+      verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
+      verifyBlockPoolDirectories(true, dn2StorageDir1, bpid2);
+      verifyBlockPoolDirectories(true, dn2StorageDir2, bpid2);
+      //make sure second block pool is running all fine
+      Path gammaFile = new Path("/gamma");
+      DFSTestUtil.createFile(fs2, gammaFile, 1024, (short) 1, 55);
+      fs2.setReplication(gammaFile, (short)2);
+      DFSTestUtil.waitReplication(fs2, gammaFile, (short) 2);
+      
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  @Test
+  public void testDfsAdminDeleteBlockPool() throws Exception {
+    Configuration conf = new Configuration();
+    MiniDFSCluster cluster = null;
+    try {
+      conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES,
+          "namesServerId1,namesServerId2");
+      cluster = new MiniDFSCluster.Builder(conf).federation(true).numNameNodes(
+          2).numDataNodes(1).build();
+
+      cluster.waitActive();
+
+      FileSystem fs1 = cluster.getFileSystem(0);
+      FileSystem fs2 = cluster.getFileSystem(1);
+
+      DFSTestUtil.createFile(fs1, new Path("/alpha"), 1024, (short) 1, 54);
+      DFSTestUtil.createFile(fs2, new Path("/beta"), 1024, (short) 1, 54);
+
+      DataNode dn1 = cluster.getDataNodes().get(0);
+
+      String bpid1 = cluster.getNamesystem(0).getBlockPoolId();
+      String bpid2 = cluster.getNamesystem(1).getBlockPoolId();
+      
+      File dn1StorageDir1 = MiniDFSCluster.getStorageDir(0, 0);
+      File dn1StorageDir2 = MiniDFSCluster.getStorageDir(0, 1);
+      
+      Configuration nn1Conf = cluster.getConfiguration(0);
+      nn1Conf.set(DFSConfigKeys.DFS_FEDERATION_NAMESERVICES, "namesServerId1");
+      dn1.refreshNamenodes(nn1Conf);
+      Assert.assertEquals(1, dn1.getAllBpOs().length);
+      
+      DFSAdmin admin = new DFSAdmin(nn1Conf);
+      String dn1Address = dn1.getSelfAddr().getHostName()+":"+dn1.getIpcPort();
+      String[] args = { "-deleteBlockPool", dn1Address, bpid2 };
+      
+      int ret = admin.run(args);
+      Assert.assertFalse(0 == ret);
+
+      verifyBlockPoolDirectories(true, dn1StorageDir1, bpid2);
+      verifyBlockPoolDirectories(true, dn1StorageDir2, bpid2);
+      
+      String[] forceArgs = { "-deleteBlockPool", dn1Address, bpid2, "force" };
+      ret = admin.run(forceArgs);
+      Assert.assertEquals(0, ret);
+      
+      verifyBlockPoolDirectories(false, dn1StorageDir1, bpid2);
+      verifyBlockPoolDirectories(false, dn1StorageDir2, bpid2);
+      
+      //bpid1 remains good
+      verifyBlockPoolDirectories(true, dn1StorageDir1, bpid1);
+      verifyBlockPoolDirectories(true, dn1StorageDir2, bpid1);
+      
+    } finally {
+      if (cluster != null) {
+        cluster.shutdown();
+      }
+    }
+  }
+  
+  private void verifyBlockPoolDirectories(boolean shouldExist,
+      File storageDir, String bpid) throws IOException {
+    File bpDir = new File(storageDir, DataStorage.STORAGE_DIR_CURRENT + "/"
+        + bpid);
+
+    if (shouldExist == false) {
+      Assert.assertFalse(bpDir.exists());
+    } else {
+      File bpCurrentDir = new File(bpDir, DataStorage.STORAGE_DIR_CURRENT);
+      File finalizedDir = new File(bpCurrentDir,
+          DataStorage.STORAGE_DIR_FINALIZED);
+      File rbwDir = new File(bpCurrentDir, DataStorage.STORAGE_DIR_RBW);
+      File versionFile = new File(bpCurrentDir, "VERSION");
+
+      Assert.assertTrue(finalizedDir.isDirectory());
+      Assert.assertTrue(rbwDir.isDirectory());
+      Assert.assertTrue(versionFile.exists());
+    }
+  }
+}