Browse Source

HDFS-1456. Provide builder for constructing instances of MiniDFSCluster.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1025788 13f79535-47bb-0310-9956-ffa450edef68
Jakob Homan 14 years ago
parent
commit
8c4dcaee7c
100 changed files with 506 additions and 239 deletions
  1. 3 0
      CHANGES.txt
  2. 1 1
      src/contrib/fuse-dfs/src/test/TestFuseDFS.java
  3. 1 1
      src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java
  4. 1 1
      src/test/aop/org/apache/hadoop/fs/TestFiListPath.java
  5. 1 1
      src/test/aop/org/apache/hadoop/hdfs/TestFiHftp.java
  6. 1 1
      src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java
  7. 1 1
      src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java
  8. 4 1
      src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java
  9. 1 1
      src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java
  10. 1 1
      src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java
  11. 1 1
      src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsSymlink.java
  12. 1 1
      src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java
  13. 3 2
      src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java
  14. 1 1
      src/test/hdfs/org/apache/hadoop/fs/TestUrlStreamHandler.java
  15. 1 1
      src/test/hdfs/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java
  16. 4 4
      src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java
  17. 2 1
      src/test/hdfs/org/apache/hadoop/hdfs/BenchmarkThroughput.java
  18. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/FileAppendTest4.java
  19. 130 0
      src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java
  20. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java
  21. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java
  22. 6 7
      src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  23. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java
  24. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java
  25. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java
  26. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java
  27. 4 4
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java
  28. 6 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java
  29. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSMkdirs.java
  30. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java
  31. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java
  32. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java
  33. 36 6
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java
  34. 15 15
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java
  35. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java
  36. 6 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java
  37. 12 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java
  38. 24 6
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java
  39. 5 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java
  40. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java
  41. 11 7
      src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
  42. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeConfig.java
  43. 3 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java
  44. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeRegistration.java
  45. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java
  46. 2 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java
  47. 5 5
      src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java
  48. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java
  49. 3 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestFSOutputSummer.java
  50. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java
  51. 4 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java
  52. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java
  53. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java
  54. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java
  55. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java
  56. 20 17
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java
  57. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java
  58. 7 5
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java
  59. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java
  60. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java
  61. 3 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java
  62. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java
  63. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSTrash.java
  64. 3 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java
  65. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestHftpFileSystem.java
  66. 5 4
      src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java
  67. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestLargeBlock.java
  68. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java
  69. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java
  70. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java
  71. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInDFS.java
  72. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInFileContext.java
  73. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java
  74. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestLocalDFS.java
  75. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java
  76. 2 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java
  77. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java
  78. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java
  79. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java
  80. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java
  81. 28 20
      src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java
  82. 10 7
      src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java
  83. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java
  84. 2 2
      src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java
  85. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java
  86. 9 4
      src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java
  87. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java
  88. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java
  89. 7 1
      src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java
  90. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java
  91. 1 2
      src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java
  92. 16 7
      src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java
  93. 23 11
      src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java
  94. 3 2
      src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java
  95. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java
  96. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java
  97. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java
  98. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java
  99. 3 3
      src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java
  100. 1 1
      src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java

+ 3 - 0
CHANGES.txt

@@ -157,6 +157,9 @@ Trunk (unreleased changes)
 
     HDFS-1426. Remove unused method BlockInfo#listCount. (hairong)
 
+    HDFS-1456. Provide builder for constructing instances of MiniDFSCluster.
+    (jghoman)
+
   OPTIMIZATIONS
 
     HDFS-1140. Speedup INode.getPathComponents. (Dmytro Molkov via shv)

+ 1 - 1
src/contrib/fuse-dfs/src/test/TestFuseDFS.java

@@ -115,7 +115,7 @@ public class TestFuseDFS extends TestCase {
     try {
       Configuration conf = new HdfsConfiguration();
       conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,false);
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       fileSys = (DistributedFileSystem)cluster.getFileSystem();
       assertTrue(fileSys.getFileStatus(new Path("/")).isDir());
       mount(mpoint, fileSys.getUri());

+ 1 - 1
src/contrib/hdfsproxy/src/test/org/apache/hadoop/hdfsproxy/TestHdfsProxy.java

@@ -210,7 +210,7 @@ public class TestHdfsProxy extends TestCase {
       dfsConf.set("hadoop.proxyuser.users.ip-addresses", "localhost");
       dfsConf.set("hadoop.proxyuser." + System.getProperty("user.name") +
           ".ip-addresses", "localhost");
-      cluster = new MiniDFSCluster(dfsConf, 2, true, null);
+      cluster = new MiniDFSCluster.Builder(dfsConf).numDataNodes(2).build();
       cluster.waitActive();
 
       final FileSystem localfs = FileSystem.get(LOCAL_FS, dfsConf);

+ 1 - 1
src/test/aop/org/apache/hadoop/fs/TestFiListPath.java

@@ -56,7 +56,7 @@ public class TestFiListPath {
   public static void setup() throws IOException {
     Configuration conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, LIST_LIMIT);
-    cluster = new MiniDFSCluster(conf, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitClusterUp();
     fs = cluster.getFileSystem();
   }

+ 1 - 1
src/test/aop/org/apache/hadoop/hdfs/TestFiHftp.java

@@ -83,7 +83,7 @@ public class TestFiHftp {
     final Configuration conf = new Configuration();
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
       cluster.waitActive();
 
       //test with a file

+ 1 - 1
src/test/aop/org/apache/hadoop/hdfs/TestFiPipelines.java

@@ -57,7 +57,7 @@ public class TestFiPipelines {
 
   @Before
   public void startUpCluster() throws IOException {
-    cluster = new MiniDFSCluster(conf, REPL_FACTOR, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
     fs = (DistributedFileSystem) cluster.getFileSystem();
   }
 

+ 1 - 1
src/test/aop/org/apache/hadoop/hdfs/server/datanode/TestFiDataTransferProtocol2.java

@@ -88,7 +88,7 @@ public class TestFiDataTransferProtocol2 {
     FiTestUtil.LOG.info("size=" + size + ", nPackets=" + nPackets
         + ", lastPacketSize=" + lastPacketSize);
 
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
     final FileSystem dfs = cluster.getFileSystem();
     try {
       final Path p = new Path("/" + methodName + "/foo");

+ 4 - 1
src/test/hdfs/org/apache/hadoop/cli/TestHDFSCLI.java

@@ -52,7 +52,10 @@ public class TestHDFSCLI extends CLITestHelper {
                         "/rack2", "/rack3", "/rack4", "/rack4" };
     String [] hosts = {"host1", "host2", "host3", "host4",
                        "host5", "host6", "host7", "host8" };
-    dfsCluster = new MiniDFSCluster(conf, 8, true, racks, hosts);
+    dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
+                                                 .racks(racks)
+                                                 .hosts(hosts)
+                                                 .build();
     
     namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
     

+ 1 - 1
src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsCreateMkdir.java

@@ -42,7 +42,7 @@ public class TestFcHdfsCreateMkdir extends
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException, URISyntaxException  {
     Configuration conf = new HdfsConfiguration();
-    cluster = new MiniDFSCluster(conf, 2, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     fc = FileContext.getFileContext(cluster.getURI(), conf);
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
         UserGroupInformation.getCurrentUser().getShortUserName()));

+ 1 - 1
src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsPermission.java

@@ -42,7 +42,7 @@ public class TestFcHdfsPermission extends FileContextPermissionBase {
   public static void clusterSetupAtBegining()
                                     throws IOException, LoginException, URISyntaxException  {
     Configuration conf = new HdfsConfiguration();
-    cluster = new MiniDFSCluster(conf, 2, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     fc = FileContext.getFileContext(cluster.getURI(), conf);
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
         UserGroupInformation.getCurrentUser().getShortUserName()));

+ 1 - 1
src/test/hdfs/org/apache/hadoop/fs/TestFcHdfsSymlink.java

@@ -71,7 +71,7 @@ public class TestFcHdfsSymlink extends FileContextSymlinkBaseTest {
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
     conf.set(FsPermission.UMASK_LABEL, "000");
-    cluster = new MiniDFSCluster(conf, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).build();
     fc = FileContext.getFileContext(cluster.getURI());
   }
   

+ 1 - 1
src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java

@@ -50,7 +50,7 @@ public class TestGlobPaths extends TestCase {
   protected void setUp() throws Exception {
     try {
       Configuration conf = new HdfsConfiguration();
-      dfsCluster = new MiniDFSCluster(conf, 1, true, null);
+      dfsCluster = new MiniDFSCluster.Builder(conf).build();
       fs = FileSystem.get(conf);
     } catch (IOException e) {
       e.printStackTrace();

+ 3 - 2
src/test/hdfs/org/apache/hadoop/fs/TestHDFSFileContextMainOperations.java

@@ -48,7 +48,7 @@ public class TestHDFSFileContextMainOperations extends
   @BeforeClass
   public static void clusterSetupAtBegining() throws IOException,
       LoginException, URISyntaxException {
-    cluster = new MiniDFSCluster(CONF, 2, true, null);
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
     cluster.waitClusterUp();
     fc = FileContext.getFileContext(cluster.getURI(), CONF);
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 
@@ -61,7 +61,8 @@ public class TestHDFSFileContextMainOperations extends
       cluster.shutdown();
       cluster = null;
     }
-    cluster = new MiniDFSCluster(CONF, 1, false, null);
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(1)
+                                              .format(false).build();
     cluster.waitClusterUp();
     fc = FileContext.getFileContext(cluster.getURI(), CONF);
     defaultWorkingDirectory = fc.makeQualified( new Path("/user/" + 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/fs/TestUrlStreamHandler.java

@@ -50,7 +50,7 @@ public class TestUrlStreamHandler extends TestCase {
   public void testDfsUrls() throws IOException {
 
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     FileSystem fs = cluster.getFileSystem();
 
     // Setup our own factory

+ 1 - 1
src/test/hdfs/org/apache/hadoop/fs/loadGenerator/TestLoadGenerator.java

@@ -150,7 +150,7 @@ public class TestLoadGenerator extends Configured implements Tool {
     writer.write(FILE_STRUCTURE_SECOND_LINE+"\n");
     writer.close();
     
-    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(3).build();
     cluster.waitActive();
     
     try {

+ 4 - 4
src/test/hdfs/org/apache/hadoop/fs/permission/TestStickyBit.java

@@ -164,7 +164,7 @@ public class TestStickyBit extends TestCase {
       Configuration conf = new HdfsConfiguration();
       conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
       conf.setBoolean("dfs.support.append", true);
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
 
       FileSystem hdfs = cluster.getFileSystem();
 
@@ -203,7 +203,7 @@ public class TestStickyBit extends TestCase {
       // Set up cluster for testing
       Configuration conf = new HdfsConfiguration();
       conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
       FileSystem hdfs = cluster.getFileSystem();
 
       assertTrue(hdfs instanceof DistributedFileSystem);
@@ -249,7 +249,7 @@ public class TestStickyBit extends TestCase {
     try {
       Configuration conf = new HdfsConfiguration();
       conf.setBoolean(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, true);
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
       FileSystem hdfs = cluster.getFileSystem();
 
       assertTrue(hdfs instanceof DistributedFileSystem);
@@ -269,7 +269,7 @@ public class TestStickyBit extends TestCase {
       cluster.shutdown();
 
       // Start file system up again
-      cluster = new MiniDFSCluster(conf, 4, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build();
       hdfs = cluster.getFileSystem();
 
       assertTrue(hdfs.exists(sbSet));

+ 2 - 1
src/test/hdfs/org/apache/hadoop/hdfs/BenchmarkThroughput.java

@@ -206,7 +206,8 @@ public class BenchmarkThroughput extends Configured implements Tool {
     }
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster(conf, 1, true, new String[]{"/foo"});
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .racks(new String[]{"/foo"}).build();
       cluster.waitActive();
       FileSystem dfs = cluster.getFileSystem();
       for(int i=0; i < reps; ++i) {

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/FileAppendTest4.java

@@ -61,7 +61,7 @@ public class FileAppendTest4 {
   public static void startUp () throws IOException {
     conf = new HdfsConfiguration();
     init(conf);
-    cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
     fs = (DistributedFileSystem)cluster.getFileSystem();
   }
 

+ 130 - 0
src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -72,6 +72,121 @@ import org.apache.hadoop.util.ToolRunner;
  */
 public class MiniDFSCluster {
 
+  /**
+   * Class to construct instances of MiniDFSClusters with specific options.
+   */
+  public static class Builder {
+    private int nameNodePort = 0;
+    private final Configuration conf;
+    private int numDataNodes = 1;
+    private boolean format = true;
+    private boolean manageNameDfsDirs = true;
+    private boolean manageDataDfsDirs = true;
+    private StartupOption option = null;
+    private String[] racks = null; 
+    private String [] hosts = null;
+    private long [] simulatedCapacities = null;
+    
+    public Builder(Configuration conf) {
+      this.conf = conf;
+    }
+    
+    /**
+     * Default: 0
+     */
+    public Builder nameNodePort(int val) {
+      this.nameNodePort = val;
+      return this;
+    }
+
+    /**
+     * Default: 1
+     */
+    public Builder numDataNodes(int val) {
+      this.numDataNodes = val;
+      return this;
+    }
+
+    /**
+     * Default: true
+     */
+    public Builder format(boolean val) {
+      this.format = val;
+      return this;
+    }
+
+    /**
+     * Default: true
+     */
+    public Builder manageNameDfsDirs(boolean val) {
+      this.manageNameDfsDirs = val;
+      return this;
+    }
+
+    /**
+     * Default: true
+     */
+    public Builder manageDataDfsDirs(boolean val) {
+      this.manageDataDfsDirs = val;
+      return this;
+    }
+
+    /**
+     * Default: null
+     */
+    public Builder startupOption(StartupOption val) {
+      this.option = val;
+      return this;
+    }
+
+    /**
+     * Default: null
+     */
+    public Builder racks(String[] val) {
+      this.racks = val;
+      return this;
+    }
+
+    /**
+     * Default: null
+     */
+    public Builder hosts(String[] val) {
+      this.hosts = val;
+      return this;
+    }
+
+    /**
+     * Default: null
+     */
+    public Builder simulatedCapacities(long[] val) {
+      this.simulatedCapacities = val;
+      return this;
+    }
+    
+    /**
+     * Construct the actual MiniDFSCluster
+     */
+    public MiniDFSCluster build() throws IOException {
+      return new MiniDFSCluster(this);
+    }
+  }
+  
+  /**
+   * Used by builder to create and return an instance of MiniDFSCluster
+   */
+  private MiniDFSCluster(Builder builder) throws IOException {
+    initMiniDFSCluster(builder.nameNodePort,
+                       builder.conf,
+                       builder.numDataNodes,
+                       builder.format,
+                       builder.manageNameDfsDirs,
+                       builder.manageDataDfsDirs,
+                       builder.option,
+                       builder.racks,
+                       builder.hosts,
+                       builder.simulatedCapacities);
+  }
+  
   public class DataNodeProperties {
     DataNode datanode;
     Configuration conf;
@@ -117,6 +232,7 @@ public class MiniDFSCluster {
    * @param nameNodeOperation the operation with which to start the servers.  If null
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    */
+  @Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
   public MiniDFSCluster(Configuration conf,
                         int numDataNodes,
                         StartupOption nameNodeOperation) throws IOException {
@@ -137,6 +253,7 @@ public class MiniDFSCluster {
    * @param format if true, format the NameNode and DataNodes before starting up
    * @param racks array of strings indicating the rack that each DataNode is on
    */
+  @Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
   public MiniDFSCluster(Configuration conf,
                         int numDataNodes,
                         boolean format,
@@ -158,6 +275,7 @@ public class MiniDFSCluster {
    * @param racks array of strings indicating the rack that each DataNode is on
    * @param hosts array of strings indicating the hostname for each DataNode
    */
+  @Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
   public MiniDFSCluster(Configuration conf,
                         int numDataNodes,
                         boolean format,
@@ -183,6 +301,7 @@ public class MiniDFSCluster {
    *          or StartupOption.FORMAT, then StartupOption.REGULAR will be used.
    * @param racks array of strings indicating the rack that each DataNode is on
    */
+  @Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
   public MiniDFSCluster(int nameNodePort, 
                         Configuration conf,
                         int numDataNodes,
@@ -213,6 +332,7 @@ public class MiniDFSCluster {
    * @param racks array of strings indicating the rack that each DataNode is on
    * @param simulatedCapacities array of capacities of the simulated data nodes
    */
+  @Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
   public MiniDFSCluster(int nameNodePort, 
                         Configuration conf,
                         int numDataNodes,
@@ -247,6 +367,7 @@ public class MiniDFSCluster {
    * @param hosts array of strings indicating the hostnames of each DataNode
    * @param simulatedCapacities array of capacities of the simulated data nodes
    */
+  @Deprecated // in 22 to be removed in 24. Use MiniDFSCluster.Builder instead
   public MiniDFSCluster(int nameNodePort, 
                         Configuration conf,
                         int numDataNodes,
@@ -256,6 +377,15 @@ public class MiniDFSCluster {
                         StartupOption operation,
                         String[] racks, String hosts[],
                         long[] simulatedCapacities) throws IOException {
+    initMiniDFSCluster(nameNodePort, conf, numDataNodes, format,
+        manageNameDfsDirs, manageDataDfsDirs, operation, racks, hosts,
+        simulatedCapacities);
+  }
+
+  private void initMiniDFSCluster(int nameNodePort, Configuration conf,
+      int numDataNodes, boolean format, boolean manageNameDfsDirs,
+      boolean manageDataDfsDirs, StartupOption operation, String[] racks,
+      String[] hosts, long[] simulatedCapacities) throws IOException {
     this.conf = conf;
     base_dir = new File(getBaseDirectory());
     data_dir = new File(base_dir, "data");

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java

@@ -36,7 +36,7 @@ public class TestAbandonBlock extends junit.framework.TestCase {
       = "/" + TestAbandonBlock.class.getSimpleName() + "_"; 
 
   public void testAbandonBlock() throws IOException {
-    MiniDFSCluster cluster = new MiniDFSCluster(CONF, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(2).build();
     FileSystem fs = cluster.getFileSystem();
 
     String src = FILE_NAME_PREFIX + "foo";

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestBlockMissingException.java

@@ -52,7 +52,7 @@ public class TestBlockMissingException extends TestCase {
     int numBlocks = 4;
     conf = new HdfsConfiguration();
     try {
-      dfs = new MiniDFSCluster(conf, NUM_DATANODES, true, null);
+      dfs = new MiniDFSCluster.Builder(conf).numDataNodes(NUM_DATANODES).build();
       dfs.waitActive();
       fileSys = (DistributedFileSystem)dfs.getFileSystem();
       Path file1 = new Path("/user/dhruba/raidtest/file1");

+ 6 - 7
src/test/hdfs/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -20,13 +20,12 @@ package org.apache.hadoop.hdfs;
 import java.io.IOException;
 import java.util.ArrayList;
 
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
+import junit.framework.TestCase;
+
 import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
-
-import junit.framework.TestCase;
+import org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor;
 
 /**
  * This class tests DatanodeDescriptor.getBlocksScheduled() at the
@@ -36,9 +35,9 @@ import junit.framework.TestCase;
 public class TestBlocksScheduledCounter extends TestCase {
 
   public void testBlocksScheduledCounter() throws IOException {
-    
-    MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 1, 
-                                                true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration())
+                                               .build();
+
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestClientBlockVerification.java

@@ -56,7 +56,7 @@ public class TestClientBlockVerification {
     conf = new HdfsConfiguration();
     int numDataNodes = 1;
     conf.setInt("dfs.replication", numDataNodes);
-    cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestClientProtocolForPipelineRecovery.java

@@ -41,7 +41,7 @@ public class TestClientProtocolForPipelineRecovery {
     int numDataNodes = 1;
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean("dfs.support.append", true);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
     try {
       cluster.waitActive();
       FileSystem fileSys = cluster.getFileSystem();

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestCrcCorruption.java

@@ -72,7 +72,7 @@ public class TestCrcCorruption {
     Random random = new Random();
 
     try {
-      cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
       FileSystem fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat", replFactor);
@@ -248,7 +248,7 @@ public class TestCrcCorruption {
 
     Configuration conf = new Configuration();
     conf.setInt("dfs.replication", numDataNodes);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
 
     try {
       cluster.waitActive();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientExcludedNodes.java

@@ -37,7 +37,7 @@ public class TestDFSClientExcludedNodes {
   @Test
   public void testExcludedNodes() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fs = cluster.getFileSystem();
     Path filePath = new Path("/testExcludedNodes");
 

+ 4 - 4
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSClientRetries.java

@@ -92,7 +92,7 @@ public class TestDFSClientRetries extends TestCase {
     final int bufferSize = 4096;
     conf.setInt("io.file.buffer.size", bufferSize);
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     
     try {
       cluster.waitActive();
@@ -185,7 +185,7 @@ public class TestDFSClientRetries extends TestCase {
     Configuration conf = new Configuration();
     // Set short retry timeout so this test runs faster
     conf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 
     try {
       cluster.waitActive();
@@ -384,7 +384,7 @@ public class TestDFSClientRetries extends TestCase {
     conf.setInt("dfs.client.max.block.acquire.failures", retries);
     conf.setInt("dfs.client.retry.window.base", timeWin);
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, replicationFactor, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(replicationFactor).build();
     cluster.waitActive();
     
     FileSystem fs = cluster.getFileSystem();
@@ -538,7 +538,7 @@ public class TestDFSClientRetries extends TestCase {
     final Path p = new Path(f);
 
     final Configuration conf = new Configuration();
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
       cluster.waitActive();
 

+ 6 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSFinalize.java

@@ -99,7 +99,12 @@ public class TestDFSFinalize extends TestCase {
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf, 1, StartupOption.REGULAR);
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .format(false)
+                                  .manageDataDfsDirs(false)
+                                  .manageNameDfsDirs(false)
+                                  .startupOption(StartupOption.REGULAR)
+                                  .build();
       cluster.finalizeCluster(conf);
       checkResult(nameNodeDirs, dataNodeDirs);
 

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSMkdirs.java

@@ -44,7 +44,7 @@ public class TestDFSMkdirs extends TestCase {
    */
   public void testDFSMkdirs() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     FileSystem fileSys = cluster.getFileSystem();
     try {
       // First create a new directory with mkdirs
@@ -81,7 +81,7 @@ public class TestDFSMkdirs extends TestCase {
    */
   public void testMkdir() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     DistributedFileSystem dfs = (DistributedFileSystem) cluster.getFileSystem();
     try {
       // Create a dir in root dir, should succeed

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSPermission.java

@@ -112,7 +112,7 @@ public class TestDFSPermission extends TestCase {
    * setPermission works correctly
    */
   public void testPermissionSetting() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
       cluster.waitActive();
       fs = FileSystem.get(conf);
@@ -228,7 +228,7 @@ public class TestDFSPermission extends TestCase {
 
   /* check if the ownership of a file/directory is set correctly */
   public void testOwnership() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
       cluster.waitActive();
       testOwnership(OpType.CREATE); // test file creation
@@ -323,7 +323,7 @@ public class TestDFSPermission extends TestCase {
   /* Check if namenode performs permission checking correctly for
    * superuser, file owner, group owner, and other users */
   public void testPermissionChecking() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
       cluster.waitActive();
       fs = FileSystem.get(conf);

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRemove.java

@@ -56,7 +56,7 @@ public class TestDFSRemove extends junit.framework.TestCase {
   
   public void testRemove() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     try {
       FileSystem fs = cluster.getFileSystem();
       assertTrue(fs.mkdirs(dir));

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRename.java

@@ -46,7 +46,7 @@ public class TestDFSRename extends junit.framework.TestCase {
   
   public void testRename() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     try {
       FileSystem fs = cluster.getFileSystem();
       assertTrue(fs.mkdirs(dir));

+ 36 - 6
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSRollback.java

@@ -91,7 +91,12 @@ public class TestDFSRollback extends TestCase {
    */
   void startNameNodeShouldFail(StartupOption operation) {
     try {
-      cluster = new MiniDFSCluster(conf, 0, operation); // should fail
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                                .startupOption(operation)
+                                                .format(false)
+                                                .manageDataDfsDirs(false)
+                                                .manageNameDfsDirs(false)
+                                                .build(); // should fail
       throw new AssertionError("NameNode should have failed to start");
     } catch (Exception expected) {
       // expected
@@ -130,7 +135,12 @@ public class TestDFSRollback extends TestCase {
       log("Normal NameNode rollback", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                                .format(false)
+                                                .manageDataDfsDirs(false)
+                                                .manageNameDfsDirs(false)
+                                                .startupOption(StartupOption.ROLLBACK)
+                                                .build();
       checkResult(NAME_NODE, nameNodeDirs);
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
@@ -138,7 +148,12 @@ public class TestDFSRollback extends TestCase {
       log("Normal DataNode rollback", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                                .format(false)
+                                                .manageDataDfsDirs(false)
+                                                .manageNameDfsDirs(false)
+                                                .startupOption(StartupOption.ROLLBACK)
+                                                .build();
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
       cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
@@ -154,7 +169,12 @@ public class TestDFSRollback extends TestCase {
       
       log("DataNode rollback without existing previous dir", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                                .format(false)
+                                                .manageDataDfsDirs(false)
+                                                .manageNameDfsDirs(false)
+                                                .startupOption(StartupOption.UPGRADE)
+                                                .build();
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       cluster.startDataNodes(conf, 1, false, StartupOption.ROLLBACK, null);
       cluster.shutdown();
@@ -164,7 +184,12 @@ public class TestDFSRollback extends TestCase {
       log("DataNode rollback with future stored layout version in previous", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                                .format(false)
+                                                .manageDataDfsDirs(false)
+                                                .manageNameDfsDirs(false)
+                                                .startupOption(StartupOption.ROLLBACK)
+                                                .build();
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
@@ -181,7 +206,12 @@ public class TestDFSRollback extends TestCase {
       log("DataNode rollback with newer fsscTime in previous", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "previous");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.ROLLBACK);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                                .format(false)
+                                                .manageDataDfsDirs(false)
+                                                .manageNameDfsDirs(false)
+                                                .startupOption(StartupOption.ROLLBACK)
+                                                .build();
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,

+ 15 - 15
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShell.java

@@ -94,7 +94,7 @@ public class TestDFSShell extends TestCase {
 
   public void testZeroSizeFile() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
                fs instanceof DistributedFileSystem);
@@ -136,7 +136,7 @@ public class TestDFSShell extends TestCase {
   
   public void testRecrusiveRm() throws IOException {
 	  Configuration conf = new HdfsConfiguration();
-	  MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+	  MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
 	  FileSystem fs = cluster.getFileSystem();
 	  assertTrue("Not a HDFS: " + fs.getUri(), 
 			  fs instanceof DistributedFileSystem);
@@ -161,7 +161,7 @@ public class TestDFSShell extends TestCase {
     
   public void testDu() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
                 fs instanceof DistributedFileSystem);
@@ -210,7 +210,7 @@ public class TestDFSShell extends TestCase {
   }
   public void testPut() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
                fs instanceof DistributedFileSystem);
@@ -309,7 +309,7 @@ public class TestDFSShell extends TestCase {
     MiniDFSCluster cluster = null;
     PrintStream bak = null;
     try {
-      cluster = new MiniDFSCluster(conf, 2, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
       FileSystem srcFs = cluster.getFileSystem();
       Path root = new Path("/nonexistentfile");
       bak = System.err;
@@ -453,11 +453,11 @@ public class TestDFSShell extends TestCase {
     MiniDFSCluster dstCluster = null;
     String bak = System.getProperty("test.build.data");
     try{
-      srcCluster = new MiniDFSCluster(srcConf, 2, true, null);
+      srcCluster = new MiniDFSCluster.Builder(srcConf).numDataNodes(2).build();
       File nameDir = new File(new File(bak), "dfs_tmp_uri/");
       nameDir.mkdirs();
       System.setProperty("test.build.data", nameDir.toString());
-      dstCluster = new MiniDFSCluster(dstConf, 2, true, null);
+      dstCluster = new MiniDFSCluster.Builder(dstConf).numDataNodes(2).build();
       FileSystem srcFs = srcCluster.getFileSystem();
       FileSystem dstFs = dstCluster.getFileSystem();
       FsShell shell = new FsShell();
@@ -542,7 +542,7 @@ public class TestDFSShell extends TestCase {
     Configuration conf = new HdfsConfiguration();
     MiniDFSCluster cluster = null;
     try {
-      cluster = new MiniDFSCluster(conf, 2, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
       final FileSystem dfs = cluster.getFileSystem();
       textTest(new Path("/texttest").makeQualified(dfs.getUri(),
             dfs.getWorkingDirectory()), conf);
@@ -596,7 +596,7 @@ public class TestDFSShell extends TestCase {
 
   public void testCopyToLocal() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
                fs instanceof DistributedFileSystem);
@@ -693,7 +693,7 @@ public class TestDFSShell extends TestCase {
 
   public void testCount() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
     FsShell shell = new FsShell();
     shell.setConf(conf);
@@ -859,7 +859,7 @@ public class TestDFSShell extends TestCase {
     conf.set(DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY, "true");
     
     //test chmod on DFS
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     fs = cluster.getFileSystem();
     testChmod(conf, fs, "/tmp/chmodTest");
     
@@ -917,7 +917,7 @@ public class TestDFSShell extends TestCase {
     Configuration conf = new HdfsConfiguration();
     /* This tests some properties of ChecksumFileSystem as well.
      * Make sure that we create ChecksumDFS */
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
@@ -1140,7 +1140,7 @@ public class TestDFSShell extends TestCase {
     PrintStream bak = null;
     try {
       final Configuration conf = new HdfsConfiguration();
-      dfs = new MiniDFSCluster(conf, 2, true, null);
+      dfs = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
       FileSystem fs = dfs.getFileSystem();
       Path p = new Path("/foo");
       fs.mkdirs(p);
@@ -1179,7 +1179,7 @@ public class TestDFSShell extends TestCase {
   public void testGet() throws IOException {
     DFSTestUtil.setLogLevel2All(FSInputChecker.LOG);
     final Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
 
     try {
@@ -1237,7 +1237,7 @@ public class TestDFSShell extends TestCase {
 
   public void testLsr() throws Exception {
     final Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
 
     try {

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSShellGenericOptions.java

@@ -38,7 +38,7 @@ public class TestDFSShellGenericOptions extends TestCase {
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       namenode = FileSystem.getDefaultUri(conf).toString();
       String [] args = new String[4];
       args[2] = "-mkdir";

+ 6 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStartupVersions.java

@@ -175,7 +175,12 @@ public class TestDFSStartupVersions extends TestCase {
     StorageInfo[] versions = initializeVersions();
     UpgradeUtilities.createStorageDirs(
                                        NAME_NODE, conf.getStrings(DFSConfigKeys.DFS_NAMENODE_NAME_DIR_KEY), "current");
-    cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                              .format(false)
+                                              .manageDataDfsDirs(false)
+                                              .manageNameDfsDirs(false)
+                                              .startupOption(StartupOption.REGULAR)
+                                              .build();
     StorageInfo nameNodeVersion = new StorageInfo(
                                                   UpgradeUtilities.getCurrentLayoutVersion(),
                                                   UpgradeUtilities.getCurrentNamespaceID(cluster),

+ 12 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSStorageStateRecovery.java

@@ -184,6 +184,15 @@ public class TestDFSStorageStateRecovery extends TestCase {
     }
   }
  
+  private MiniDFSCluster createCluster(Configuration c) throws IOException {
+    return new MiniDFSCluster.Builder(c)
+                             .numDataNodes(0)
+                             .startupOption(StartupOption.REGULAR)
+                             .format(false)
+                             .manageDataDfsDirs(false)
+                             .manageNameDfsDirs(false)
+                             .build();
+  }
   /**
    * This test iterates over the testCases table and attempts
    * to startup the NameNode normally.
@@ -204,12 +213,12 @@ public class TestDFSStorageStateRecovery extends TestCase {
         log("NAME_NODE recovery", numDirs, i, testCase);
         baseDirs = createStorageState(NAME_NODE, testCase);
         if (shouldRecover) {
-          cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
+          cluster = createCluster(conf);
           checkResult(NAME_NODE, baseDirs, curAfterRecover, prevAfterRecover);
           cluster.shutdown();
         } else {
           try {
-            cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
+            cluster = createCluster(conf);
             throw new AssertionError("NameNode should have failed to start");
           } catch (IOException expected) {
             // the exception is expected
@@ -247,7 +256,7 @@ public class TestDFSStorageStateRecovery extends TestCase {
         log("DATA_NODE recovery", numDirs, i, testCase);
         createStorageState(NAME_NODE,
                            new boolean[] {true, true, false, false, false});
-        cluster = new MiniDFSCluster(conf, 0, StartupOption.REGULAR);
+        cluster = createCluster(conf);
         baseDirs = createStorageState(DATA_NODE, testCase);
         if (!testCase[0] && !testCase[1] && !testCase[2] && !testCase[3]) {
           // DataNode will create and format current if no directories exist

+ 24 - 6
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgrade.java

@@ -98,7 +98,12 @@ public class TestDFSUpgrade extends TestCase {
    */
   void startNameNodeShouldFail(StartupOption operation) {
     try {
-      cluster = new MiniDFSCluster(conf, 0, operation); // should fail
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                                .startupOption(operation)
+                                                .format(false)
+                                                .manageDataDfsDirs(false)
+                                                .manageNameDfsDirs(false)
+                                                .build(); // should fail
       throw new AssertionError("NameNode should have failed to start");
     } catch (Exception expected) {
       // expected
@@ -119,6 +124,19 @@ public class TestDFSUpgrade extends TestCase {
     }
   }
  
+  /**
+   * Create an instance of a newly configured cluster for testing that does
+   * not manage its own directories or files
+   */
+  private MiniDFSCluster createCluster() throws IOException {
+    return new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                           .format(false)
+                                           .manageDataDfsDirs(false)
+                                           .manageNameDfsDirs(false)
+                                           .startupOption(StartupOption.UPGRADE)
+                                           .build();
+  }
+  
   /**
    * This test attempts to upgrade the NameNode and DataNode under
    * a number of valid and invalid conditions.
@@ -136,14 +154,14 @@ public class TestDFSUpgrade extends TestCase {
       
       log("Normal NameNode upgrade", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      cluster = createCluster();
       checkResult(NAME_NODE, nameNodeDirs);
       cluster.shutdown();
       UpgradeUtilities.createEmptyDirs(nameNodeDirs);
       
       log("Normal DataNode upgrade", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      cluster = createCluster();
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
       checkResult(DATA_NODE, dataNodeDirs);
@@ -159,7 +177,7 @@ public class TestDFSUpgrade extends TestCase {
       
       log("DataNode upgrade with existing previous dir", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      cluster = createCluster();
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "previous");
       cluster.startDataNodes(conf, 1, false, StartupOption.REGULAR, null);
@@ -170,7 +188,7 @@ public class TestDFSUpgrade extends TestCase {
 
       log("DataNode upgrade with future stored layout version in current", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      cluster = createCluster();
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(Integer.MIN_VALUE,
@@ -185,7 +203,7 @@ public class TestDFSUpgrade extends TestCase {
       
       log("DataNode upgrade with newer fsscTime in current", numDirs);
       UpgradeUtilities.createStorageDirs(NAME_NODE, nameNodeDirs, "current");
-      cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
+      cluster = createCluster();
       baseDirs = UpgradeUtilities.createStorageDirs(DATA_NODE, dataNodeDirs, "current");
       UpgradeUtilities.createVersionFile(DATA_NODE, baseDirs,
                                          new StorageInfo(UpgradeUtilities.getCurrentLayoutVersion(),

+ 5 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestDFSUpgradeFromImage.java

@@ -182,8 +182,11 @@ public class TestDFSUpgradeFromImage extends TestCase {
         System.setProperty("test.build.data", "build/test/data");
       }
       conf.setInt("dfs.datanode.scan.period.hours", -1); // block scanning off
-      cluster = new MiniDFSCluster(0, conf, numDataNodes, false, true,
-                                   StartupOption.UPGRADE, null);
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(numDataNodes)
+                                  .format(false)
+                                  .startupOption(StartupOption.UPGRADE)
+                                  .build();
       cluster.waitActive();
       DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();
       DFSClient dfsClient = dfs.dfs;

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestDataTransferProtocol.java

@@ -192,7 +192,7 @@ public class TestDataTransferProtocol extends TestCase {
     int numDataNodes = 1;
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean("dfs.support.append", true);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
     try {
       cluster.waitActive();
       datanode = cluster.getDataNodes().get(0).dnRegistration;
@@ -323,7 +323,7 @@ public class TestDataTransferProtocol extends TestCase {
     
     Configuration conf = new HdfsConfiguration();
     conf.setInt("dfs.replication", numDataNodes); 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
     try {
     cluster.waitActive();
     DFSClient dfsClient = new DFSClient(

+ 11 - 7
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -103,7 +103,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     long startTime = System.currentTimeMillis();
     
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
     
     FileSystem fs = cluster.getFileSystem();
@@ -115,7 +115,9 @@ public class TestDatanodeBlockScanner extends TestCase {
      */
     DFSTestUtil.createFile(fs, file1, 10, (short)1, 0);
     cluster.shutdown();
-    cluster = new MiniDFSCluster(conf, 1, false, null);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(1)
+                                .format(false).build();
     cluster.waitActive();
     
     DFSClient dfsClient =  new DFSClient(new InetSocketAddress("localhost", 
@@ -174,7 +176,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     int blockCount = 0;
     int rand = random.nextInt(3);
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     Path file1 = new Path("/tmp/testBlockVerification/file1");
@@ -285,7 +287,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     LocatedBlocks blocks = null;
     int replicaCount = 0;
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDataNodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     Path file1 = new Path("/tmp/testBlockCorruptRecovery/file");
@@ -386,7 +388,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     final Configuration conf = new HdfsConfiguration();
     final short REPLICATION_FACTOR = (short)2;
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION_FACTOR).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     try {
@@ -402,8 +404,10 @@ public class TestDatanodeBlockScanner extends TestCase {
       cluster.shutdown();
 
       // restart the cluster
-      cluster = new MiniDFSCluster(
-          0, conf, REPLICATION_FACTOR, false, true, null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(REPLICATION_FACTOR)
+                                  .format(false)
+                                  .build();
       cluster.startDataNodes(conf, 1, true, null, null);
       cluster.waitActive();  // now we have 3 datanodes
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeConfig.java

@@ -47,7 +47,7 @@ public class TestDatanodeConfig {
   public static void setUp() throws Exception {
     clearBaseDir();
     Configuration conf = new HdfsConfiguration();
-    cluster = new MiniDFSCluster(conf, 0, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
     cluster.waitActive();
   }
 

+ 3 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeDeath.java

@@ -288,7 +288,8 @@ public class TestDatanodeDeath extends TestCase {
     conf.setInt("dfs.heartbeat.interval", 2);
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 2);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_TIMEOUT_KEY, 5000);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     Modify modThread = null;
@@ -346,7 +347,7 @@ public class TestDatanodeDeath extends TestCase {
     int myMaxNodes = 5;
     System.out.println("SimpleTest starting with DataNode to Kill " + 
                        datanodeToKill);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, myMaxNodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(myMaxNodes).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     short repl = 3;

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeRegistration.java

@@ -43,7 +43,7 @@ public class TestDatanodeRegistration extends TestCase {
     MiniDFSCluster cluster = null;
     FileSystem fs = null;
     try {
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       fs = cluster.getFileSystem();
 
       InetSocketAddress addr = new InetSocketAddress(

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeReport.java

@@ -43,7 +43,7 @@ public class TestDatanodeReport extends TestCase {
         DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 500); // 0.5s
     conf.setLong("dfs.heartbeat.interval", 1L);
     MiniDFSCluster cluster = 
-      new MiniDFSCluster(conf, NUM_OF_DATANODES, true, null);
+      new MiniDFSCluster.Builder(conf).numDataNodes(NUM_OF_DATANODES).build();
     try {
       //wait until the cluster is up
       cluster.waitActive();

+ 2 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestDecommission.java

@@ -256,7 +256,8 @@ public class TestDecommission extends TestCase {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, 4);
     writeConfigFile(localFileSys, excludeFile, null);
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes).build();
     cluster.waitActive();
     InetSocketAddress addr = new InetSocketAddress("localhost", 
                                                    cluster.getNameNodePort());

+ 5 - 5
src/test/hdfs/org/apache/hadoop/hdfs/TestDistributedFileSystem.java

@@ -58,7 +58,7 @@ public class TestDistributedFileSystem {
   @Test
   public void testFileSystemCloseAll() throws Exception {
     Configuration conf = getTestConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 0, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).build();
     URI address = FileSystem.getDefaultUri(conf);
 
     try {
@@ -82,7 +82,7 @@ public class TestDistributedFileSystem {
   @Test
   public void testDFSClose() throws Exception {
     Configuration conf = getTestConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     FileSystem fileSys = cluster.getFileSystem();
 
     try {
@@ -103,7 +103,7 @@ public class TestDistributedFileSystem {
     MiniDFSCluster cluster = null;
 
     try {
-      cluster = new MiniDFSCluster(conf, 2, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
       final Path filepath = new Path("/test/LeaseChecker/foo");
       final long millis = System.currentTimeMillis();
 
@@ -186,7 +186,7 @@ public class TestDistributedFileSystem {
     int lsLimit = 2;
     final Configuration conf = getTestConfiguration();
     conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, lsLimit);
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     try {
       final FileSystem fs = cluster.getFileSystem();
       Path dir = new Path("/test");
@@ -290,7 +290,7 @@ public class TestDistributedFileSystem {
     final Configuration conf = getTestConfiguration();
     conf.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
 
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem hdfs = cluster.getFileSystem();
     final String hftpuri = "hftp://" + conf.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);
     System.out.println("hftpuri=" + hftpuri);

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFSInputChecker.java

@@ -299,7 +299,7 @@ public class TestFSInputChecker extends TestCase {
     rand.nextBytes(expected);
 
     // test DFS
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fileSys = cluster.getFileSystem();
     try {
       testChecker(fileSys, true);

+ 3 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestFSOutputSummer.java

@@ -113,8 +113,9 @@ public class TestFSOutputSummer extends TestCase {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, BYTES_PER_CHECKSUM);
-    MiniDFSCluster cluster = new MiniDFSCluster(
-        conf, NUM_OF_DATANODES, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(NUM_OF_DATANODES)
+                                               .build();
     fileSys = cluster.getFileSystem();
     try {
       Path file = new Path("try.dat");

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java

@@ -106,7 +106,7 @@ public class TestFileAppend extends TestCase {
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     InetSocketAddress addr = new InetSocketAddress("localhost",
                                                    cluster.getNameNodePort());
@@ -177,7 +177,7 @@ public class TestFileAppend extends TestCase {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     try {
 
@@ -232,7 +232,7 @@ public class TestFileAppend extends TestCase {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     try {
 

+ 4 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend2.java

@@ -86,7 +86,7 @@ public class TestFileAppend2 extends TestCase {
     conf.setInt("dfs.datanode.handler.count", 50);
     conf.setBoolean("dfs.support.append", true);
     fileContents = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     try {
       { // test appending to a file.
@@ -339,8 +339,9 @@ public class TestFileAppend2 extends TestCase {
     conf.setInt("dfs.datanode.handler.count", 50);
     conf.setBoolean("dfs.support.append", true);
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, 
-                                                true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes)
+                                               .build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java

@@ -69,7 +69,7 @@ public class TestFileAppend3 extends junit.framework.TestCase {
         conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, 512);
         conf.setBoolean("dfs.support.append", true);
         buffersize = conf.getInt("io.file.buffer.size", 4096);
-        cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
         fs = (DistributedFileSystem)cluster.getFileSystem();
       }
     

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend4.java

@@ -149,7 +149,7 @@ public class TestFileAppend4 {
    */
   @Test(timeout=60000)
   public void testRecoverFinalizedBlock() throws Throwable {
-    cluster = new MiniDFSCluster(conf, 3, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  
     try {
       cluster.waitActive();
@@ -220,7 +220,7 @@ public class TestFileAppend4 {
    */
   @Test(timeout=60000)
   public void testCompleteOtherLeaseHoldersFile() throws Throwable {
-    cluster = new MiniDFSCluster(conf, 3, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
  
     try {
       cluster.waitActive();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileConcurrentReader.java

@@ -85,7 +85,7 @@ public class TestFileConcurrentReader extends junit.framework.TestCase {
     if (cluster != null) {
       cluster.shutdown();
     }
-    cluster = new MiniDFSCluster(conf, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitClusterUp();
     fileSystem = cluster.getFileSystem();
   }

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCorruption.java

@@ -58,7 +58,7 @@ public class TestFileCorruption extends TestCase {
     DFSTestUtil util = new DFSTestUtil("TestFileCorruption", 20, 3, 8*1024);
     try {
       Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster(conf, 3, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
       FileSystem fs = cluster.getFileSystem();
       util.createFiles(fs, "/srcdat");
       // Now deliberately remove the blocks
@@ -113,7 +113,7 @@ public class TestFileCorruption extends TestCase {
     MiniDFSCluster cluster = null;
     try {
       Configuration conf = new HdfsConfiguration();
-      cluster = new MiniDFSCluster(conf, 2, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
       cluster.waitActive();
       
       FileSystem fs = cluster.getFileSystem();

+ 20 - 17
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -106,8 +106,9 @@ public class TestFileCreation extends junit.framework.TestCase {
     conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, FSConstants.DEFAULT_WRITE_PACKET_SIZE);
     conf.setInt("dfs.replication", FSConstants.DEFAULT_REPLICATION_FACTOR + 1);
     conf.setInt("io.file.buffer.size", FSConstants.DEFAULT_FILE_BUFFER_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf,
-        FSConstants.DEFAULT_REPLICATION_FACTOR + 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                     .numDataNodes(FSConstants.DEFAULT_REPLICATION_FACTOR + 1)
+                     .build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     try {
@@ -131,7 +132,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     try {
 
@@ -210,7 +211,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     FileSystem localfs = FileSystem.getLocal(conf);
 
@@ -275,7 +276,7 @@ public class TestFileCreation extends junit.framework.TestCase {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     cluster.waitActive();
     InetSocketAddress addr = new InetSocketAddress("localhost",
@@ -349,7 +350,7 @@ public class TestFileCreation extends junit.framework.TestCase {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     DistributedFileSystem dfs = null;
     try {
       cluster.waitActive();
@@ -419,7 +420,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     }
 
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = null;
     try {
       cluster.waitActive();
@@ -489,8 +490,9 @@ public class TestFileCreation extends junit.framework.TestCase {
         Thread.sleep(2*MAX_IDLE_TIME);
       } catch (InterruptedException e) {
       }
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                               .format(false)
+                                               .build();
       cluster.waitActive();
 
       // restart cluster yet again. This triggers the code to read in
@@ -500,8 +502,9 @@ public class TestFileCreation extends junit.framework.TestCase {
         Thread.sleep(5000);
       } catch (InterruptedException e) {
       }
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
       fs = cluster.getFileSystem();
 
@@ -554,7 +557,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     DistributedFileSystem dfs = (DistributedFileSystem) fs;
     DFSClient dfsclient = dfs.dfs;
@@ -589,7 +592,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     final Path path = new Path("/" + System.currentTimeMillis()
         + "-testFileCreationNonRecursive");
@@ -689,7 +692,7 @@ public class TestFileCreation extends junit.framework.TestCase {
    */
   public void testConcurrentFileCreation() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 
     try {
       FileSystem fs = cluster.getFileSystem();
@@ -731,7 +734,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     conf.setInt("dfs.heartbeat.interval", 1);
 
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
     DistributedFileSystem dfs = null;
     try {
       cluster.waitActive();
@@ -790,7 +793,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     Configuration conf = new HdfsConfiguration();
 
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
     DistributedFileSystem dfs = null;
     try {
       cluster.waitActive();
@@ -821,7 +824,7 @@ public class TestFileCreation extends junit.framework.TestCase {
     conf.setInt("ipc.ping.interval", 10000); // hdfs timeout is now 10 second
 
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
     DistributedFileSystem dfs = null;
     try {
       cluster.waitActive();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationClient.java

@@ -50,7 +50,7 @@ public class TestFileCreationClient extends junit.framework.TestCase {
     Configuration conf = new HdfsConfiguration();
     conf.setInt("dfs.datanode.handler.count", 1);
     conf.setInt("dfs.replication", REPLICATION);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, REPLICATION, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPLICATION).build();
 
     try {
       final FileSystem fs = cluster.getFileSystem();

+ 7 - 5
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationDelete.java

@@ -45,7 +45,7 @@ public class TestFileCreationDelete extends junit.framework.TestCase {
     conf.setBoolean("dfs.support.append", true);
 
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = null;
     try {
       cluster.waitActive();
@@ -76,16 +76,18 @@ public class TestFileCreationDelete extends junit.framework.TestCase {
       // This ensures that leases are persisted in fsimage.
       cluster.shutdown();
       try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
 
       // restart cluster yet again. This triggers the code to read in
       // persistent leases from fsimage.
       cluster.shutdown();
       try {Thread.sleep(5000);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
       fs = cluster.getFileSystem();
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreationEmpty.java

@@ -55,7 +55,7 @@ public class TestFileCreationEmpty extends junit.framework.TestCase {
     conf.setInt("dfs.heartbeat.interval", 1);
 
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
     try {
       cluster.waitActive();
       DistributedFileSystem dfs = (DistributedFileSystem)cluster.getFileSystem();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileStatus.java

@@ -71,7 +71,7 @@ public class TestFileStatus {
   public static void testSetUp() throws Exception {
     conf = new HdfsConfiguration();
     conf.setInt(DFSConfigKeys.DFS_LIST_LIMIT, 2);
-    cluster = new MiniDFSCluster(conf, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).build();
     fs = cluster.getFileSystem();
     fc = FileContext.getFileContext(cluster.getURI(), conf);
     hftpfs = cluster.getHftpFileSystem();

+ 3 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -51,8 +51,9 @@ public class TestGetBlocks extends TestCase {
     final Random r = new Random();
     
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster(
-          CONF, REPLICATION_FACTOR, true, null );
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(CONF)
+                                               .numDataNodes(REPLICATION_FACTOR)
+                                               .build();
     try {
       cluster.waitActive();
       

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java

@@ -30,7 +30,7 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
   @Override
   protected void setUp() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    cluster = new MiniDFSCluster(conf, 2, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     fs = cluster.getFileSystem();
     defaultWorkingDirectory = "/user/" + 
            UserGroupInformation.getCurrentUser().getShortUserName();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestHDFSTrash.java

@@ -38,7 +38,7 @@ public class TestHDFSTrash extends TestTrash {
     TestSetup setup = new TestSetup(new TestSuite(TestHDFSTrash.class)) {
       protected void setUp() throws Exception {
         Configuration conf = new HdfsConfiguration();
-        cluster = new MiniDFSCluster(conf, 2, true, null);
+        cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
       }
       protected void tearDown() throws Exception {
         if (cluster != null) { cluster.shutdown(); }

+ 3 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestHFlush.java

@@ -98,7 +98,8 @@ public class TestHFlush {
     final int SECTIONS = 10;
 
     fileContent = AppendTestUtil.initBuffer(AppendTestUtil.FILE_SIZE);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, replicas, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(replicas).build();
     // Make sure we work with DFS in order to utilize all its functionality
     DistributedFileSystem fileSystem =
         (DistributedFileSystem)cluster.getFileSystem();
@@ -168,7 +169,7 @@ public class TestHFlush {
     final Path p = new Path("/pipelineHeartbeat/foo");
     System.out.println("p=" + p);
     
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, DATANODE_NUM, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(DATANODE_NUM).build();
     DistributedFileSystem fs = (DistributedFileSystem)cluster.getFileSystem();
 
     byte[] fileContents = AppendTestUtil.initBuffer(fileLen);

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestHftpFileSystem.java

@@ -68,7 +68,7 @@ public class TestHftpFileSystem extends TestCase {
     config = new Configuration();
     config.set(DFSConfigKeys.DFS_DATANODE_HOST_NAME_KEY, "localhost");
 
-    cluster = new MiniDFSCluster(config, 2, true, null);
+    cluster = new MiniDFSCluster.Builder(config).numDataNodes(2).build();
     hdfs = cluster.getFileSystem();
     final String hftpuri = 
       "hftp://" + config.get(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY);

+ 5 - 4
src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java

@@ -138,8 +138,7 @@ public class TestInjectionForSimulatedStorage extends TestCase {
       conf.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, checksumSize);
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
       //first time format
-      cluster = new MiniDFSCluster(0, conf, numDataNodes, true,
-                                   true, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                             cluster.getNameNodePort()),
@@ -169,8 +168,10 @@ public class TestInjectionForSimulatedStorage extends TestCase {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
       conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); 
       
-      cluster = new MiniDFSCluster(0, conf, numDataNodes*2, false,
-                                   true, null, null);
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(numDataNodes * 2)
+                                  .format(false)
+                                  .build();
       cluster.waitActive();
       Set<Block> uniqueBlocks = new HashSet<Block>();
       for (int i=0; i<blocksList.length; ++i) {

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestLargeBlock.java

@@ -183,7 +183,7 @@ public class TestLargeBlock extends junit.framework.TestCase {
     if (simulatedStorage) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = cluster.getFileSystem();
     try {
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestLease.java

@@ -32,7 +32,7 @@ public class TestLease extends junit.framework.TestCase {
 
   public void testLease() throws Exception {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     try {
       FileSystem fs = cluster.getFileSystem();
       assertTrue(fs.mkdirs(dir));

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -72,7 +72,7 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
     MiniDFSCluster cluster = null;
 
     try {
-      cluster = new MiniDFSCluster(conf, 5, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
       cluster.waitActive();
 
       //create a file

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery2.java

@@ -72,7 +72,7 @@ public class TestLeaseRecovery2 {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE);
     conf.setInt("dfs.heartbeat.interval", 1);
 
-    cluster = new MiniDFSCluster(conf, 5, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(5).build();
     cluster.waitActive();
     dfs = (DistributedFileSystem)cluster.getFileSystem();
   }

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInDFS.java

@@ -39,7 +39,7 @@ public class TestListFilesInDFS extends TestListFiles {
 
   @BeforeClass
   public static void testSetUp() throws Exception {
-    cluster = new MiniDFSCluster(conf, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).build();
     fs = cluster.getFileSystem();
     fs.delete(TEST_DIR, true);
   }

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestListFilesInFileContext.java

@@ -63,7 +63,7 @@ public class TestListFilesInFileContext {
 
   @BeforeClass
   public static void testSetUp() throws Exception {
-    cluster = new MiniDFSCluster(conf, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).build();
     fc = FileContext.getFileContext(cluster.getConfiguration());
     fc.delete(TEST_DIR, true);
   }

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestListPathServlet.java

@@ -55,7 +55,7 @@ public class TestListPathServlet {
   @BeforeClass
   public static void setup() throws Exception {
     // start a cluster with single datanode
-    cluster = new MiniDFSCluster(CONF, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(CONF).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestLocalDFS.java

@@ -61,7 +61,7 @@ public class TestLocalDFS extends TestCase {
    */
   public void testWorkingDirectory() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path orig_path = fileSys.getWorkingDirectory();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestMissingBlocksAlert.java

@@ -52,7 +52,7 @@ public class TestMissingBlocksAlert extends TestCase {
       int fileLen = 10*1024;
 
       //start a cluster with single datanode
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
 
       DistributedFileSystem dfs = 

+ 2 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java

@@ -76,7 +76,8 @@ public class TestModTime extends TestCase {
   public void testModTime() throws IOException {
     Configuration conf = new HdfsConfiguration();
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes).build();
     cluster.waitActive();
     InetSocketAddress addr = new InetSocketAddress("localhost", 
                                                    cluster.getNameNodePort());

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestPipelines.java

@@ -61,7 +61,7 @@ public class TestPipelines {
 
   @Before
   public void startUpCluster() throws IOException {
-    cluster = new MiniDFSCluster(conf, REPL_FACTOR, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
     fs = (DistributedFileSystem) cluster.getFileSystem();
   }
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestPread.java

@@ -210,7 +210,7 @@ public class TestPread extends TestCase {
     if (disableTransferTo) {
       conf.setBoolean("dfs.datanode.transferTo.allowed", false);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("preadtest.dat");

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestQuota.java

@@ -62,7 +62,7 @@ public class TestQuota extends TestCase {
     // Space quotas
     conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
     conf.setBoolean("dfs.support.append", true);
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
                 fs instanceof DistributedFileSystem);
@@ -283,7 +283,7 @@ public class TestQuota extends TestCase {
    *  mkdirs, rename, and delete */
   public void testNamespaceCommands() throws Exception {
     final Configuration conf = new HdfsConfiguration();
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
                 fs instanceof DistributedFileSystem);
@@ -455,7 +455,7 @@ public class TestQuota extends TestCase {
     // diskspace quotas
     conf.set(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, "512");
     conf.setBoolean("dfs.support.append", true);
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     final FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(),
                 fs instanceof DistributedFileSystem);

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestReadWhileWriting.java

@@ -59,7 +59,7 @@ public class TestReadWhileWriting {
     conf.setLong("dfs.heartbeat.interval", 1);
 
     // create cluster
-    final MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    final MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     try {
       //change the lease limits.
       cluster.setLeasePeriod(SOFT_LEASE_LIMIT, HARD_LEASE_LIMIT);

+ 28 - 20
src/test/hdfs/org/apache/hadoop/hdfs/TestRenameWhileOpen.java

@@ -57,7 +57,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
 
     // create cluster
     System.out.println("Test 1*****************************");
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = null;
     try {
       cluster.waitActive();
@@ -102,16 +102,18 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
       // This ensures that leases are persisted in fsimage.
       cluster.shutdown();
       try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
 
       // restart cluster yet again. This triggers the code to read in
       // persistent leases from fsimage.
       cluster.shutdown();
       try {Thread.sleep(5000);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
       fs = cluster.getFileSystem();
 
@@ -141,7 +143,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
     System.out.println("Test 2************************************");
 
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = null;
     try {
       cluster.waitActive();
@@ -174,16 +176,18 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
       // This ensures that leases are persisted in fsimage.
       cluster.shutdown();
       try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
 
       // restart cluster yet again. This triggers the code to read in
       // persistent leases from fsimage.
       cluster.shutdown();
       try {Thread.sleep(5000);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
       fs = cluster.getFileSystem();
 
@@ -214,7 +218,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
     System.out.println("Test 3************************************");
 
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = null;
     try {
       cluster.waitActive();
@@ -239,16 +243,18 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
       // This ensures that leases are persisted in fsimage.
       cluster.shutdown();
       try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
 
       // restart cluster yet again. This triggers the code to read in
       // persistent leases from fsimage.
       cluster.shutdown();
       try {Thread.sleep(5000);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
       fs = cluster.getFileSystem();
 
@@ -277,7 +283,7 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
     System.out.println("Test 4************************************");
 
     // create cluster
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fs = null;
     try {
       cluster.waitActive();
@@ -301,16 +307,18 @@ public class TestRenameWhileOpen extends junit.framework.TestCase {
       // This ensures that leases are persisted in fsimage.
       cluster.shutdown();
       try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
 
       // restart cluster yet again. This triggers the code to read in
       // persistent leases from fsimage.
       cluster.shutdown();
       try {Thread.sleep(5000);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true, 
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
       fs = cluster.getFileSystem();
 

+ 10 - 7
src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java

@@ -152,7 +152,7 @@ public class TestReplication extends TestCase {
     DFSClient dfsClient = null;
     LocatedBlocks blocks = null;
     int replicaCount = 0;
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     cluster.waitActive();
     fs = cluster.getFileSystem();
     dfsClient = new DFSClient(new InetSocketAddress("localhost",
@@ -197,7 +197,9 @@ public class TestReplication extends TestCase {
     if (simulated) {
       conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
     }
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, racks);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes)
+                                               .racks(racks).build();
     cluster.waitActive();
     
     InetSocketAddress addr = new InetSocketAddress("localhost",
@@ -311,8 +313,7 @@ public class TestReplication extends TestCase {
       Configuration conf = new HdfsConfiguration();
       conf.set("dfs.replication", Integer.toString(numDataNodes));
       //first time format
-      cluster = new MiniDFSCluster(0, conf, numDataNodes, true,
-                                   true, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDataNodes).build();
       cluster.waitActive();
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                             cluster.getNameNodePort()),
@@ -380,8 +381,10 @@ public class TestReplication extends TestCase {
       conf.set("dfs.datanode.block.write.timeout.sec", Integer.toString(5));
       conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.75f"); // only 3 copies exist
       
-      cluster = new MiniDFSCluster(0, conf, numDataNodes*2, false,
-                                   true, null, null);
+      cluster = new MiniDFSCluster.Builder(conf)
+                                  .numDataNodes(numDataNodes * 2)
+                                  .format(false)
+                                  .build();
       cluster.waitActive();
       
       dfsClient = new DFSClient(new InetSocketAddress("localhost",
@@ -402,7 +405,7 @@ public class TestReplication extends TestCase {
    * @throws Exception
    */
   public void testReplicateLenMismatchedBlock() throws Exception {
-    MiniDFSCluster cluster = new MiniDFSCluster(new HdfsConfiguration(), 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(new HdfsConfiguration()).numDataNodes(2).build();
     try {
       cluster.waitActive();
       // test truncated block

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/TestRestartDFS.java

@@ -46,7 +46,7 @@ public class TestRestartDFS extends TestCase {
         conf.set(DFSConfigKeys.DFS_NAMENODE_SERVICE_RPC_ADDRESS_KEY,
                  "localhost:0");
       }
-      cluster = new MiniDFSCluster(conf, 4, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).build();
       FileSystem fs = cluster.getFileSystem();
       files.createFiles(fs, dir);
 
@@ -65,7 +65,7 @@ public class TestRestartDFS extends TestCase {
                  "localhost:0");
       }
       // Here we restart the MiniDFScluster without formatting namenode
-      cluster = new MiniDFSCluster(conf, 4, false, null); 
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build(); 
       FileSystem fs = cluster.getFileSystem();
       assertTrue("Filesystem corrupted after restart.",
                  files.checkFiles(fs, dir));
@@ -89,7 +89,7 @@ public class TestRestartDFS extends TestCase {
       }
       // This is a second restart to check that after the first restart
       // the image written in parallel to both places did not get corrupted
-      cluster = new MiniDFSCluster(conf, 4, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(4).format(false).build();
       FileSystem fs = cluster.getFileSystem();
       assertTrue("Filesystem corrupted after restart.",
                  files.checkFiles(fs, dir));

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestSafeMode.java

@@ -58,7 +58,7 @@ public class TestSafeMode extends TestCase {
       Configuration conf = new HdfsConfiguration();
       // disable safemode extension to make the test run faster.
       conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_EXTENSION_KEY, "1");
-      cluster = new MiniDFSCluster(conf, 1, true, null);
+      cluster = new MiniDFSCluster.Builder(conf).build();
       cluster.waitActive();
       
       fs = (DistributedFileSystem)cluster.getFileSystem();
@@ -74,7 +74,7 @@ public class TestSafeMode extends TestCase {
       cluster.shutdown();
       
       // now bring up just the NameNode.
-      cluster = new MiniDFSCluster(conf, 0, false, null);
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false).build();
       cluster.waitActive();
       fs = (DistributedFileSystem)cluster.getFileSystem();
       

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestSeekBug.java

@@ -124,7 +124,7 @@ public class TestSeekBug extends TestCase {
    */
   public void testSeekBugDFS() throws IOException {
     Configuration conf = new HdfsConfiguration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("seektest.dat");

+ 9 - 4
src/test/hdfs/org/apache/hadoop/hdfs/TestSetTimes.java

@@ -84,7 +84,9 @@ public class TestSetTimes extends TestCase {
     conf.setInt("dfs.heartbeat.interval", 1);
 
 
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes)
+                                               .build();
     cluster.waitActive();
     final int nnport = cluster.getNameNodePort();
     InetSocketAddress addr = new InetSocketAddress("localhost", 
@@ -159,8 +161,9 @@ public class TestSetTimes extends TestCase {
       // shutdown cluster and restart
       cluster.shutdown();
       try {Thread.sleep(2*MAX_IDLE_TIME);} catch (InterruptedException e) {}
-      cluster = new MiniDFSCluster(nnport, conf, 1, false, true,
-                                   null, null, null);
+      cluster = new MiniDFSCluster.Builder(conf).nameNodePort(nnport)
+                                                .format(false)
+                                                .build();
       cluster.waitActive();
       fileSys = cluster.getFileSystem();
 
@@ -196,7 +199,9 @@ public class TestSetTimes extends TestCase {
     conf.setInt(DFSConfigKeys.DFS_NAMENODE_HEARTBEAT_RECHECK_INTERVAL_KEY, 1000);
     conf.setInt("dfs.heartbeat.interval", 1);
     conf.setInt("dfs.datanode.handler.count", 50);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, numDatanodes, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf)
+                                               .numDataNodes(numDatanodes)
+                                               .build();
     cluster.waitActive();
     InetSocketAddress addr = new InetSocketAddress("localhost",
                                                      cluster.getNameNodePort());

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestSetrepIncreasing.java

@@ -33,7 +33,7 @@ public class TestSetrepIncreasing extends TestCase {
     conf.set("dfs.replication", "" + fromREP);
     conf.setLong("dfs.blockreport.intervalMsec", 1000L);
     conf.set(DFSConfigKeys.DFS_NAMENODE_REPLICATION_PENDING_TIMEOUT_SEC_KEY, Integer.toString(2));
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 10, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(10).build();
     FileSystem fs = cluster.getFileSystem();
     assertTrue("Not a HDFS: "+fs.getUri(), fs instanceof DistributedFileSystem);
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestSmallBlock.java

@@ -95,7 +95,7 @@ public class TestSmallBlock extends TestCase {
       conf.setBoolean("dfs.datanode.simulateddatastorage", true);
     }
     conf.set(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, "1");
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     FileSystem fileSys = cluster.getFileSystem();
     try {
       Path file1 = new Path("smallblocktest.dat");

+ 7 - 1
src/test/hdfs/org/apache/hadoop/hdfs/UpgradeUtilities.java

@@ -104,7 +104,13 @@ public class UpgradeUtilities {
       
       // format and start NameNode and start DataNode
       GenericTestUtils.formatNamenode(config);
-      cluster = new MiniDFSCluster(config, 1, StartupOption.REGULAR);
+      cluster =  new MiniDFSCluster.Builder(config)
+                                   .numDataNodes(1)
+                                   .startupOption(StartupOption.REGULAR)
+                                   .format(false)
+                                   .manageDataDfsDirs(false)
+                                   .manageNameDfsDirs(false)
+                                   .build();
         
       NameNode namenode = cluster.getNameNode();
       namenodeStorageNamespaceID = namenode.versionRequest().getNamespaceID();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationToken.java

@@ -59,7 +59,7 @@ public class TestDelegationToken {
     config.set("hadoop.security.auth_to_local",
         "RULE:[2:$1@$0](JobTracker@.*FOO.COM)s/@.*//" + "DEFAULT");
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
-    cluster = new MiniDFSCluster(0, config, 1, true, true, true,  null, null, null, null);
+    cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
     cluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
   }

+ 1 - 2
src/test/hdfs/org/apache/hadoop/hdfs/security/TestDelegationTokenForProxyUser.java

@@ -96,8 +96,7 @@ public class TestDelegationTokenForProxyUser {
         "group1");
     configureSuperUserIPAddresses(config, REAL_USER);
     FileSystem.setDefaultUri(config, "hdfs://localhost:" + "0");
-    cluster = new MiniDFSCluster(0, config, 1, true, true, true, null, null,
-        null, null);
+    cluster = new MiniDFSCluster.Builder(config).build();
     cluster.waitActive();
     cluster.getNamesystem().getDelegationTokenSecretManager().startThreads();
     ProxyUsers.refreshSuperUserGroupsConfiguration(config);

+ 16 - 7
src/test/hdfs/org/apache/hadoop/hdfs/server/balancer/TestBalancer.java

@@ -85,7 +85,7 @@ public class TestBalancer extends TestCase {
    */
   private ExtendedBlock[] generateBlocks(Configuration conf, long size,
       short numNodes) throws IOException {
-    cluster = new MiniDFSCluster( conf, numNodes, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numNodes).build();
     try {
       cluster.waitActive();
       client = DFSClient.createNamenode(conf);
@@ -171,8 +171,11 @@ public class TestBalancer extends TestCase {
 
     // restart the cluster: do NOT format the cluster
     conf.set(DFSConfigKeys.DFS_NAMENODE_SAFEMODE_THRESHOLD_PCT_KEY, "0.0f"); 
-    cluster = new MiniDFSCluster(0, conf, numDatanodes,
-        false, true, null, racks, capacities);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDatanodes)
+                                              .format(false)
+                                              .racks(racks)
+                                              .simulatedCapacities(capacities)
+                                              .build();
     cluster.waitActive();
     client = DFSClient.createNamenode(conf);
 
@@ -209,8 +212,11 @@ public class TestBalancer extends TestCase {
       long newCapacity, String newRack) throws Exception {
     int numOfDatanodes = capacities.length;
     assertEquals(numOfDatanodes, racks.length);
-    cluster = new MiniDFSCluster(0, conf, capacities.length, true, true, null, 
-        racks, capacities);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(capacities.length)
+                                .racks(racks)
+                                .simulatedCapacities(capacities)
+                                .build();
     try {
       cluster.waitActive();
       client = DFSClient.createNamenode(conf);
@@ -348,8 +354,11 @@ public class TestBalancer extends TestCase {
       throws Exception {
     int numOfDatanodes = capacities.length;
     assertEquals(numOfDatanodes, racks.length);
-    cluster = new MiniDFSCluster(0, conf, capacities.length, true, true, null,
-        racks, capacities);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(capacities.length)
+                                .racks(racks)
+                                .simulatedCapacities(capacities)
+                                .build();
     try {
       cluster.waitActive();
       client = DFSClient.createNamenode(conf);

+ 23 - 11
src/test/hdfs/org/apache/hadoop/hdfs/server/common/TestDistributedUpgrade.java

@@ -58,12 +58,14 @@ public class TestDistributedUpgrade extends TestCase {
    */
   void startNameNodeShouldFail(StartupOption operation) {
     try {
-      //cluster = new MiniDFSCluster(conf, 0, operation); // should fail
+      //cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).startupOption(operation).build(); // should fail
       // we set manage dirs to true as NN has to start from untar'ed image with 
       // nn dirs set to name1 and name2
-      cluster = new MiniDFSCluster(0, conf, 0, false, true,
-          operation, null); // Should fail
-      throw new AssertionError("NameNode should have failed to start");
+      cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                              .format(false)
+                                              .startupOption(operation)
+                                              .build(); // should fail
+      throw new AssertionError("Jakob was here. NameNode should have failed to start");
     } catch (Exception expected) {
       expected = null;
       // expected
@@ -111,9 +113,12 @@ public class TestDistributedUpgrade extends TestCase {
     startNameNodeShouldFail(StartupOption.REGULAR);
 
     log("Start NameNode only distributed upgrade", numDirs);
-    // cluster = new MiniDFSCluster(conf, 0, StartupOption.UPGRADE);
-    cluster = new MiniDFSCluster(0, conf, 0, false, true,
-                                  StartupOption.UPGRADE, null);
+    // cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0).format(false)
+    // .startupOption(StartupOption.UPGRADE).build();
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(0)
+                                              .format(false)
+                                              .startupOption(StartupOption.UPGRADE)
+                                              .build();
     cluster.shutdown();
 
     log("NameNode start in regular mode when dustributed upgrade has been started", numDirs);
@@ -123,8 +128,11 @@ public class TestDistributedUpgrade extends TestCase {
     startNameNodeShouldFail(StartupOption.ROLLBACK);
 
     log("Normal distributed upgrade for the cluster", numDirs);
-    cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
-                                  StartupOption.UPGRADE, null);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(numDNs)
+                                .format(false)
+                                .startupOption(StartupOption.UPGRADE)
+                                .build();
     DFSAdmin dfsAdmin = new DFSAdmin();
     dfsAdmin.setConf(conf);
     dfsAdmin.run(new String[] {"-safemode", "wait"});
@@ -132,8 +140,12 @@ public class TestDistributedUpgrade extends TestCase {
 
     // it should be ok to start in regular mode
     log("NameCluster regular startup after the upgrade", numDirs);
-    cluster = new MiniDFSCluster(0, conf, numDNs, false, true,
-                                  StartupOption.REGULAR, null);
+    cluster = new MiniDFSCluster.Builder(conf)
+                                .numDataNodes(numDNs)
+                                .format(false)
+                                .startupOption(StartupOption.REGULAR)
+                                .build();
+
     cluster.waitActive();
     cluster.shutdown();
   }

+ 3 - 2
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java

@@ -93,8 +93,9 @@ public class TestBlockReplacement extends TestCase {
     CONF.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, DEFAULT_BLOCK_SIZE);
     CONF.setInt(DFSConfigKeys.DFS_BYTES_PER_CHECKSUM_KEY, DEFAULT_BLOCK_SIZE/2);
     CONF.setLong("dfs.blockreport.intervalMsec",500);
-    cluster = new MiniDFSCluster(
-          CONF, REPLICATION_FACTOR, true, INITIAL_RACKS );
+    cluster = new MiniDFSCluster.Builder(CONF).numDataNodes(REPLICATION_FACTOR)
+                                              .racks(INITIAL_RACKS).build();
+
     try {
       cluster.waitActive();
       

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReport.java

@@ -84,7 +84,7 @@ public class TestBlockReport {
   @Before
   public void startUpCluster() throws IOException {
     REPL_FACTOR = 1; //Reset if case a test has modified the value
-    cluster = new MiniDFSCluster(conf, REPL_FACTOR, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(REPL_FACTOR).build();
     fs = (DistributedFileSystem) cluster.getFileSystem();
   }
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMXBean.java

@@ -35,7 +35,7 @@ public class TestDataNodeMXBean {
   @Test
   public void testDataNodeMXBean() throws Exception {
     Configuration conf = new Configuration();
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
 
     try {
       List<DataNode> datanodes = cluster.getDataNodes();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeMetrics.java

@@ -33,7 +33,7 @@ public class TestDataNodeMetrics extends TestCase {
   public void testDataNodeMetrics() throws Exception {
     Configuration conf = new HdfsConfiguration();
     conf.setBoolean(SimulatedFSDataset.CONFIG_PROPERTY_SIMULATED, true);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     try {
       FileSystem fs = cluster.getFileSystem();
       final long LONG_FILE_LEN = Integer.MAX_VALUE+1L; 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDataNodeVolumeFailure.java

@@ -73,7 +73,7 @@ public class TestDataNodeVolumeFailure extends TestCase{
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, block_size);
     // Allow a single volume failure (there are two volumes)
     conf.setInt(DFSConfigKeys.DFS_DATANODE_FAILED_VOLUMES_TOLERATED_KEY, 1);
-    cluster = new MiniDFSCluster(conf, dn_num, true, null);
+    cluster = new MiniDFSCluster.Builder(conf).numDataNodes(dn_num).build();
     cluster.waitActive();
   }
   

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDatanodeRestart.java

@@ -50,7 +50,7 @@ public class TestDatanodeRestart {
     Configuration conf = new HdfsConfiguration();
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 3, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();
     cluster.waitActive();
     FileSystem fs = cluster.getFileSystem();
     try {
@@ -74,7 +74,7 @@ public class TestDatanodeRestart {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
     conf.setBoolean("dfs.support.append", true);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).numDataNodes(2).build();
     cluster.waitActive();
     try {
       testRbwReplicas(cluster, false);
@@ -137,7 +137,7 @@ public class TestDatanodeRestart {
     conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, 1024L);
     conf.setInt(DFSConfigKeys.DFS_CLIENT_WRITE_PACKET_SIZE_KEY, 512);
     conf.setBoolean("dfs.support.append", true);
-    MiniDFSCluster cluster = new MiniDFSCluster(conf, 1, true, null);
+    MiniDFSCluster cluster = new MiniDFSCluster.Builder(conf).build();
     cluster.waitActive();
     try {
       FileSystem fs = cluster.getFileSystem();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestDirectoryScanner.java

@@ -212,7 +212,7 @@ public class TestDirectoryScanner extends TestCase {
   }
   
   public void runTest(int parallelism) throws Exception {
-    cluster = new MiniDFSCluster(CONF, 1, true, null);
+    cluster = new MiniDFSCluster.Builder(CONF).build();
     try {
       cluster.waitActive();
       fds = (FSDataset) cluster.getDataNodes().get(0).getFSDataset();

Some files were not shown because too many files changed in this diff