瀏覽代碼

HADOOP-3910. Remove unused ClusterTestDFSNamespaceLogging and
ClusterTestDFS. Contributed by Tsz Wo (Nicholas), SZE.


git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@691091 13f79535-47bb-0310-9956-ffa450edef68

Christopher Douglas 17 年之前
父節點
當前提交
20eae9761b

+ 3 - 0
CHANGES.txt

@@ -437,6 +437,9 @@ Trunk (unreleased changes)
     HADOOP-3950. Cause the Mini MR cluster to wait for task trackers to 
     HADOOP-3950. Cause the Mini MR cluster to wait for task trackers to 
     register before continuing. (enis via omalley)
     register before continuing. (enis via omalley)
 
 
+    HADOOP-3910. Remove unused ClusterTestDFSNamespaceLogging and
+    ClusterTestDFS. (Tsz Wo (Nicholas), SZE via cdouglas)
+
 Release 0.18.1 - Unreleased
 Release 0.18.1 - Unreleased
 
 
   BUG FIXES
   BUG FIXES

+ 0 - 532
src/test/org/apache/hadoop/hdfs/ClusterTestDFS.java

@@ -1,532 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import junit.framework.TestCase;
-import junit.framework.AssertionFailedError;
-
-import org.apache.commons.logging.*;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FSInputStream;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.hdfs.protocol.Block;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-import org.apache.hadoop.io.UTF8;
-import org.apache.hadoop.conf.Configuration;
-
-
-import java.io.File;
-import java.io.OutputStream;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.ListIterator;
-import java.util.Random;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.InvocationTargetException;
-
-/**
- * Test DFS.
- * ClusterTestDFS is a JUnit test for DFS using "pseudo multiprocessing" (or 
- more strictly, pseudo distributed) meaning all daemons run in one process 
- and sockets are used to communicate between daemons.  The test permutes
- * various block sizes, number of files, file sizes, and number of
- * datanodes.  After creating 1 or more files and filling them with random
- * data, one datanode is shutdown, and then the files are verfified.
- * Next, all the random test files are deleted and we test for leakage
- * (non-deletion) by directly checking the real directories corresponding
- * to the datanodes still running.
- * <p>
- * Usage notes: TEST_PERMUTATION_MAX can be adjusted to perform more or
- * less testing of permutations.  The ceiling of useful permutation is
- * TEST_PERMUTATION_MAX_CEILING.
- * <p>
- * DFSClient emits many messages that can be ignored like:
- * "Failed to connect to *:7000:java.net.ConnectException: Connection refused: connect"
- * because a datanode is forced to close during testing.
- * <p>
- * Warnings about "Zero targets found" can be ignored (these are naggingly
- * emitted even though it is not possible to achieve the desired replication
- * level with the number of active datanodes.)
- * <p>
- * Possible Extensions:
- * <p>Bring a datanode down and restart it to verify reconnection to namenode.
- * <p>Simulate running out of disk space on one datanode only.
- * <p>Bring the namenode down and restart it to verify that datanodes reconnect.
- * <p>
- * <p>For a another approach to filesystem testing, see the high level
- * (HadoopFS level) test {@link org.apache.hadoop.fs.TestFileSystem}.
- */
-public class ClusterTestDFS extends TestCase implements FSConstants {
-  private static final Log LOG = LogFactory.getLog(ClusterTestDFS.class);
-
-  private static Configuration conf = new Configuration();
-  private static int BUFFER_SIZE =
-    conf.getInt("io.file.buffer.size", 4096);
-
-  private static int testCycleNumber = 0;
-
-  /**
-   * all DFS test files go under this base directory
-   */
-  private static String baseDirSpecified;
-
-  /**
-   * base dir as File
-   */
-  private static File baseDir;
-
-  /** DFS block sizes to permute over in multiple test cycles
-   * (array length should be prime).
-   */
-  private static final int[] BLOCK_SIZES = {100000, 4096};
-
-  /** DFS file sizes to permute over in multiple test cycles
-   * (array length should be prime).
-   */
-  private static final int[] FILE_SIZES =
-  {100000, 100001, 4095, 4096, 4097, 1000000, 1000001};
-
-  /** DFS file counts to permute over in multiple test cycles
-   * (array length should be prime).
-   */
-  private static final int[] FILE_COUNTS = {1, 10, 100};
-
-  /** Number of useful permutations or test cycles.
-   * (The 2 factor represents the alternating 2 or 3 number of datanodes
-   * started.)
-   */
-  private static final int TEST_PERMUTATION_MAX_CEILING =
-    BLOCK_SIZES.length * FILE_SIZES.length * FILE_COUNTS.length * 2;
-
-  /** Number of permutations of DFS test parameters to perform.
-   * If this is greater than ceiling TEST_PERMUTATION_MAX_CEILING, then the
-   * ceiling value is used.
-   */
-  private static final int TEST_PERMUTATION_MAX = 3;
-  private Constructor randomDataGeneratorCtor = null;
-
-  static {
-    baseDirSpecified = System.getProperty("test.dfs.data", "/tmp/dfs_test");
-    baseDir = new File(baseDirSpecified);
-  }
-
-  protected void setUp() throws Exception {
-    super.setUp();
-    conf.setBoolean("test.dfs.same.host.targets.allowed", true);
-  }
-
-  /**
-   * Remove old files from temp area used by this test case and be sure
-   * base temp directory can be created.
-   */
-  protected void prepareTempFileSpace() {
-    if (baseDir.exists()) {
-      try { // start from a blank slate
-        FileUtil.fullyDelete(baseDir);
-      } catch (Exception ignored) {
-      }
-    }
-    baseDir.mkdirs();
-    if (!baseDir.isDirectory()) {
-      throw new RuntimeException("Value of root directory property test.dfs.data for dfs test is not a directory: "
-                                 + baseDirSpecified);
-    }
-  }
-
-  /**
-   * Pseudo Distributed FS Test.
-   * Test DFS by running all the necessary daemons in one process.
-   * Test various block sizes, number of files, disk space consumption,
-   * and leakage.
-   *
-   * @throws Exception
-   */
-  public void testFsPseudoDistributed()
-    throws Exception {
-    while (testCycleNumber < TEST_PERMUTATION_MAX &&
-           testCycleNumber < TEST_PERMUTATION_MAX_CEILING) {
-      int blockSize = BLOCK_SIZES[testCycleNumber % BLOCK_SIZES.length];
-      int numFiles = FILE_COUNTS[testCycleNumber % FILE_COUNTS.length];
-      int fileSize = FILE_SIZES[testCycleNumber % FILE_SIZES.length];
-      prepareTempFileSpace();
-      testFsPseudoDistributed(fileSize, numFiles, blockSize,
-                              (testCycleNumber % 2) + 2);
-    }
-  }
-
-  /**
-   * Pseudo Distributed FS Testing.
-   * Do one test cycle with given parameters.
-   *
-   * @param nBytes         number of bytes to write to each file.
-   * @param numFiles       number of files to create.
-   * @param blockSize      block size to use for this test cycle.
-   * @param initialDNcount number of datanodes to create
-   * @throws Exception
-   */
-  public void testFsPseudoDistributed(long nBytes, int numFiles,
-                                      int blockSize, int initialDNcount)
-    throws Exception {
-    long startTime = System.currentTimeMillis();
-    int bufferSize = Math.min(BUFFER_SIZE, blockSize);
-    boolean checkDataDirsEmpty = false;
-    int iDatanodeClosed = 0;
-    Random randomDataGenerator = makeRandomDataGenerator();
-    final int currentTestCycleNumber = testCycleNumber;
-    msg("using randomDataGenerator=" + randomDataGenerator.getClass().getName());
-
-    //
-    //     modify config for test
-
-    //
-    // set given config param to override other config settings
-    conf.setInt("test.dfs.block_size", blockSize);
-    // verify that config changed
-    assertTrue(blockSize == conf.getInt("test.dfs.block_size", 2)); // 2 is an intentional obviously-wrong block size
-    // downsize for testing (just to save resources)
-    conf.setInt("dfs.namenode.handler.count", 3);
-    if (false) { //  use MersenneTwister, if present
-      conf.set("hadoop.random.class",
-               "org.apache.hadoop.util.MersenneTwister");
-    }
-    conf.setLong("dfs.blockreport.intervalMsec", 50*1000L);
-    conf.setLong("dfs.datanode.startupMsec", 15*1000L);
-
-    String nameFSDir = baseDirSpecified + "/name";
-    msg("----Start Test Cycle=" + currentTestCycleNumber +
-        " test.dfs.block_size=" + blockSize +
-        " nBytes=" + nBytes +
-        " numFiles=" + numFiles +
-        " initialDNcount=" + initialDNcount);
-
-    //
-    //          start a NameNode
-
-    int nameNodePort = 9000 + testCycleNumber++; // ToDo: settable base port
-    String nameNodeSocketAddr = "localhost:" + nameNodePort;
-    conf.set("dfs.name.dir", nameFSDir);
-    NameNode nameNodeDaemon = new NameNode(nameNodeSocketAddr, conf);
-    DFSClient dfsClient = null;
-    try {
-      //
-      //        start some DataNodes
-      //
-      ArrayList<DataNode> listOfDataNodeDaemons = new ArrayList<DataNode>();
-      FileSystem.setDefaultUri(conf, "hdfs://"+nameNodeSocketAddr);
-      for (int i = 0; i < initialDNcount; i++) {
-        // uniquely config real fs path for data storage for this datanode
-        String dataDirs[] = new String[1];
-        dataDirs[0] = baseDirSpecified + "/datanode" + i;
-        conf.set("dfs.data.dir", dataDirs[0]);
-        DataNode dn = DataNode.makeInstance(dataDirs, conf);
-        if (dn != null) {
-          listOfDataNodeDaemons.add(dn);
-          (new Thread(dn, "DataNode" + i + ": " + dataDirs[0])).start();
-        }
-      }
-      try {
-        assertTrue("insufficient datanodes for test to continue",
-                   (listOfDataNodeDaemons.size() >= 2));
-
-        //
-        //          wait for datanodes to report in
-        awaitQuiescence();
-
-        //  act as if namenode is a remote process
-        dfsClient = new DFSClient(new InetSocketAddress("localhost", nameNodePort), conf);
-
-        //
-        //           write nBytes of data using randomDataGenerator to numFiles
-        //
-        ArrayList<UTF8> testfilesList = new ArrayList<UTF8>();
-        byte[] buffer = new byte[bufferSize];
-        UTF8 testFileName = null;
-        for (int iFileNumber = 0; iFileNumber < numFiles; iFileNumber++) {
-          testFileName = new UTF8("/f" + iFileNumber);
-          testfilesList.add(testFileName);
-          OutputStream nos = dfsClient.create(testFileName.toString(), false);
-          try {
-            for (long nBytesWritten = 0L;
-                 nBytesWritten < nBytes;
-                 nBytesWritten += buffer.length) {
-              if ((nBytesWritten + buffer.length) > nBytes) {
-                // calculate byte count needed to exactly hit nBytes in length
-                //  to keep randomDataGenerator in sync during the verify step
-                int pb = (int) (nBytes - nBytesWritten);
-                byte[] bufferPartial = new byte[pb];
-                randomDataGenerator.nextBytes(bufferPartial);
-                nos.write(bufferPartial);
-              } else {
-                randomDataGenerator.nextBytes(buffer);
-                nos.write(buffer);
-              }
-            }
-          } finally {
-            nos.flush();
-            nos.close();
-          }
-        }
-
-        //
-        // No need to wait for blocks to be replicated because replication
-        //  is supposed to be complete when the file is closed.
-        //
-
-        //
-        //                     take one datanode down
-        iDatanodeClosed =
-          currentTestCycleNumber % listOfDataNodeDaemons.size();
-        DataNode dn = (DataNode) listOfDataNodeDaemons.get(iDatanodeClosed);
-        msg("shutdown datanode daemon " + iDatanodeClosed +
-            " dn=" + dn.data);
-        try {
-          dn.shutdown();
-        } catch (Exception e) {
-          msg("ignoring datanode shutdown exception=" + e);
-        }
-
-        //
-        //          verify data against a "rewound" randomDataGenerator
-        //               that all of the data is intact
-        long lastLong = randomDataGenerator.nextLong();
-        randomDataGenerator = makeRandomDataGenerator(); // restart (make new) PRNG
-        ListIterator li = testfilesList.listIterator();
-        while (li.hasNext()) {
-          testFileName = (UTF8) li.next();
-          FSInputStream nis = dfsClient.open(testFileName.toString());
-          byte[] bufferGolden = new byte[bufferSize];
-          int m = 42;
-          try {
-            while (m != -1) {
-              m = nis.read(buffer);
-              if (m == buffer.length) {
-                randomDataGenerator.nextBytes(bufferGolden);
-                assertBytesEqual(buffer, bufferGolden, buffer.length);
-              } else if (m > 0) {
-                byte[] bufferGoldenPartial = new byte[m];
-                randomDataGenerator.nextBytes(bufferGoldenPartial);
-                assertBytesEqual(buffer, bufferGoldenPartial, bufferGoldenPartial.length);
-              }
-            }
-          } finally {
-            nis.close();
-          }
-        }
-        // verify last randomDataGenerator rand val to ensure last file length was checked
-        long lastLongAgain = randomDataGenerator.nextLong();
-        assertEquals(lastLong, lastLongAgain);
-        msg("Finished validating all file contents");
-
-        //
-        //                    now delete all the created files
-        msg("Delete all random test files under DFS via remaining datanodes");
-        li = testfilesList.listIterator();
-        while (li.hasNext()) {
-          testFileName = (UTF8) li.next();
-          assertTrue(dfsClient.delete(testFileName.toString(), true));
-        }
-
-        //
-        //                   wait for delete to be propagated
-        //                  (unlike writing files, delete is lazy)
-        msg("Test thread sleeping while datanodes propagate delete...");
-        awaitQuiescence();
-        msg("Test thread awakens to verify file contents");
-
-        //
-        //             check that the datanode's block directory is empty
-        //                (except for datanode that had forced shutdown)
-        checkDataDirsEmpty = true; // do it during finally clause
-
-      } catch (AssertionFailedError afe) {
-        throw afe;
-      } catch (Throwable t) {
-        msg("Unexpected exception_b: " + t);
-        t.printStackTrace();
-      } finally {
-        //
-        // shut down datanode daemons (this takes advantage of being same-process)
-        msg("begin shutdown of all datanode daemons for test cycle " +
-            currentTestCycleNumber);
-
-        for (int i = 0; i < listOfDataNodeDaemons.size(); i++) {
-          DataNode dataNode = (DataNode) listOfDataNodeDaemons.get(i);
-          if (i != iDatanodeClosed) {
-            try {
-              if (checkDataDirsEmpty) {
-                assertNoBlocks(dataNode);
-
-              }
-              dataNode.shutdown();
-            } catch (Exception e) {
-              msg("ignoring exception during (all) datanode shutdown, e=" + e);
-            }
-          }
-        }
-      }
-      msg("finished shutdown of all datanode daemons for test cycle " +
-          currentTestCycleNumber);
-      if (dfsClient != null) {
-        try {
-          msg("close down subthreads of DFSClient");
-          dfsClient.close();
-        } catch (Exception ignored) { }
-        msg("finished close down of DFSClient");
-      }
-    } catch (AssertionFailedError afe) {
-      throw afe;
-    } catch (Throwable t) {
-      msg("Unexpected exception_a: " + t);
-      t.printStackTrace();
-    } finally {
-      // shut down namenode daemon (this takes advantage of being same-process)
-      msg("begin shutdown of namenode daemon for test cycle " +
-          currentTestCycleNumber);
-      try {
-        nameNodeDaemon.stop();
-      } catch (Exception e) {
-        msg("ignoring namenode shutdown exception=" + e);
-      }
-      msg("finished shutdown of namenode daemon for test cycle " +
-          currentTestCycleNumber);
-    }
-    msg("test cycle " + currentTestCycleNumber + " elapsed time=" +
-        (System.currentTimeMillis() - startTime) / 1000. + "sec");
-    msg("threads still running (look for stragglers): ");
-    msg(summarizeThreadGroup());
-  }
-
-  private void assertNoBlocks(DataNode dn) {
-    Block[] blocks = dn.data.getBlockReport();
-    // if this fails, the delete did not propagate because either
-    //   awaitQuiescence() returned before the disk images were removed
-    //   or a real failure was detected.
-    assertTrue(" data dir not empty: " + dn.data,
-               blocks.length==0);
-  }
-
-  /**
-   * Make a data generator.
-   * Allows optional use of high quality PRNG by setting property
-   * hadoop.random.class to the full class path of a subclass of
-   * java.util.Random such as "...util.MersenneTwister".
-   * The property test.dfs.random.seed can supply a seed for reproducible
-   * testing (a default is set here if property is not set.)
-   */
-  private Random makeRandomDataGenerator() {
-    long seed = conf.getLong("test.dfs.random.seed", 0xB437EF);
-    try {
-      if (randomDataGeneratorCtor == null) {
-        // lazy init
-        String rndDataGenClassname =
-          conf.get("hadoop.random.class", "java.util.Random");
-        Class<?> clazz = Class.forName(rndDataGenClassname);
-        randomDataGeneratorCtor = clazz.getConstructor(Long.TYPE);
-      }
-
-      if (randomDataGeneratorCtor != null) {
-        Object arg[] = {new Long(seed)};
-        return (Random) randomDataGeneratorCtor.newInstance(arg);
-      }
-    } catch (ClassNotFoundException absorb) {
-    } catch (NoSuchMethodException absorb) {
-    } catch (SecurityException absorb) {
-    } catch (InstantiationException absorb) {
-    } catch (IllegalAccessException absorb) {
-    } catch (IllegalArgumentException absorb) {
-    } catch (InvocationTargetException absorb) {
-    }
-
-    // last resort
-    return new java.util.Random(seed);
-  }
-
-  /** Wait for the DFS datanodes to become quiescent.
-   * The initial implementation is to sleep for some fixed amount of time,
-   * but a better implementation would be to really detect when distributed
-   * operations are completed.
-   * @throws InterruptedException
-   */
-  private void awaitQuiescence() throws InterruptedException {
-    // ToDo: Need observer pattern, not static sleep
-    // Doug suggested that the block report interval could be made shorter
-    //   and then observing that would be a good way to know when an operation
-    //   was complete (quiescence detect).
-    sleepAtLeast(60000);
-  }
-
-  private void assertBytesEqual(byte[] buffer, byte[] bufferGolden, int len) {
-    for (int i = 0; i < len; i++) {
-      assertEquals(buffer[i], bufferGolden[i]);
-    }
-  }
-
-  private void msg(String s) {
-    //System.out.println(s);
-    LOG.info(s);
-  }
-
-  public static void sleepAtLeast(int tmsec) {
-    long t0 = System.currentTimeMillis();
-    long t1 = t0;
-    long tslept = t1 - t0;
-    while (tmsec > tslept) {
-      try {
-        long tsleep = tmsec - tslept;
-        Thread.sleep(tsleep);
-        t1 = System.currentTimeMillis();
-      }  catch (InterruptedException ie) {
-        t1 = System.currentTimeMillis();
-      }
-      tslept = t1 - t0;
-    }
-  }
-
-  public static String summarizeThreadGroup() {
-    int n = 10;
-    int k = 0;
-    Thread[] tarray = null;
-    StringBuffer sb = new StringBuffer(500);
-    do {
-      n = n * 10;
-      tarray = new Thread[n];
-      k = Thread.enumerate(tarray);
-    } while (k == n); // while array is too small...
-    for (int i = 0; i < k; i++) {
-      Thread thread = tarray[i];
-      sb.append(thread.toString());
-      sb.append("\n");
-    }
-    return sb.toString();
-  }
-
-  public static void main(String[] args) throws Exception {
-    String classname = ClusterTestDFS.class.getClass().getName();
-    String usage = "Usage: " + classname + " (no args)";
-    if (args.length != 0) {
-      System.err.println(usage);
-      System.exit(-1);
-    }
-    String[] testargs = {classname};
-    junit.textui.TestRunner.main(testargs);
-  }
-
-}

+ 0 - 467
src/test/org/apache/hadoop/hdfs/ClusterTestDFSNamespaceLogging.java

@@ -1,467 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.hdfs;
-
-import junit.framework.TestCase;
-import junit.framework.AssertionFailedError;
-
-import org.apache.commons.logging.*;
-
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.FileUtil;
-import org.apache.hadoop.io.UTF8;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.hdfs.protocol.FSConstants;
-import org.apache.hadoop.hdfs.server.datanode.DataNode;
-import org.apache.hadoop.hdfs.server.namenode.NameNode;
-
-import java.io.BufferedReader;
-import java.io.File;
-import java.io.FileNotFoundException;
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.OutputStream;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-
-/**
- * Test DFS logging
- * make sure that any namespace mutations are logged.
- */
-public class ClusterTestDFSNamespaceLogging extends TestCase implements FSConstants {
-  private static final Log LOG = LogFactory.getLog(ClusterTestDFS.class);
-
-  private static Configuration conf = new Configuration();
-
-  /**
-   * all DFS test files go under this base directory
-   */
-  private static String baseDirSpecified=conf.get("test.dfs.data", "/tmp/test-dfs");;
-
-  /**
-   * base dir as File
-   */
-  private static File baseDir=new File(baseDirSpecified);
-  
-  /**
-   * name node port
-   */
-  int nameNodePort = conf.getInt("dfs.namenode.port", 9020);
-  
-  /** DFS client, datanodes, and namenode
-   */
-  DFSClient dfsClient;
-  ArrayList<DataNode> dataNodeDaemons = new ArrayList<DataNode>();
-  NameNode nameNodeDaemon;
-  
-  /** Log header length
-   */
-  private static final int DIR_LOG_HEADER_LEN = 30;
-  private static final int BLOCK_LOG_HEADER_LEN = 32;
-  /** DFS block size
-   */
-  private static final int BLOCK_SIZE = 32*1024*1024;
-  
-  /** Buffer size
-   */
-  private static final int BUFFER_SIZE = 4096;
-
-  private BufferedReader logfh;
-  private String logFile;
-  
-  protected void setUp() throws Exception {
-    super.setUp();
-    conf.setBoolean("test.dfs.same.host.targets.allowed", true);
-  }
-
-  /**
-   * Remove old files from temp area used by this test case and be sure
-   * base temp directory can be created.
-   */
-  protected void prepareTempFileSpace() {
-    if (baseDir.exists()) {
-      try { // start from a blank state
-        FileUtil.fullyDelete(baseDir);
-      } catch (Exception ignored) {
-      }
-    }
-    baseDir.mkdirs();
-    if (!baseDir.isDirectory()) {
-      throw new RuntimeException("Value of root directory property" 
-                                 + "test.dfs.data for dfs test is not a directory: "
-                                 + baseDirSpecified);
-    }
-  }
-
-  /**
-   * Pseudo Distributed FS Test.
-   * Test DFS by running all the necessary daemons in one process.
-   *
-   * @throws Exception
-   */
-  public void testFsPseudoDistributed() throws Exception {
-    // test on a small cluster with 3 data nodes
-    testFsPseudoDistributed(3);
-  }
-  
-  private void testFsPseudoDistributed(int datanodeNum) throws Exception {
-    try {
-      prepareTempFileSpace();
-
-      configureDFS();
-      startDFS(datanodeNum);
-
-      if (logfh == null)
-        try {
-          logfh = new BufferedReader(new FileReader(logFile));
-        } catch (FileNotFoundException e) {
-          // TODO Auto-generated catch block
-          throw new AssertionFailedError("Log file does not exist: "+logFile);
-        }
-    
-      // create a directory
-      try {
-        assertTrue(dfsClient.mkdirs("/data"));
-        assertMkdirs("/data", false);
-      } catch (IOException ioe) {
-      	ioe.printStackTrace();
-      }
-       
-      try {
-        assertTrue(dfsClient.mkdirs("data"));
-        assertMkdirs("data", true);
-      } catch (IOException ioe) {
-       	ioe.printStackTrace();
-      }
-      
-      //
-      // create a file with 1 data block
-      try {
-        createFile("/data/xx", 1);
-        assertCreate("/data/xx", 1, false);
-      } catch(IOException ioe) {
-    	assertCreate("/data/xx", 1, true);
-      }
-    
-      // create a file with 2 data blocks
-      try {
-        createFile("/data/yy", BLOCK_SIZE+1);
-        assertCreate("/data/yy", BLOCK_SIZE+1, false);
-      } catch(IOException ioe) {
-    	assertCreate("/data/yy", BLOCK_SIZE+1, true);
-      }
-
-      // create an existing file
-      try {
-        createFile("/data/xx", 2);
-        assertCreate("/data/xx", 2, false);
-      } catch(IOException ioe) {
-      	assertCreate("/data/xx", 2, true);
-      }
-    
-      // delete the file
-      try {
-        dfsClient.delete("/data/yy", true);
-        assertDelete("/data/yy", false);
-      } catch(IOException ioe) {
-        ioe.printStackTrace();
-      }
-
-    
-      // rename the file
-      try {
-        dfsClient.rename("/data/xx", "/data/yy");
-        assertRename("/data/xx", "/data/yy", false);
-      } catch(IOException ioe) {
-      	ioe.printStackTrace();
-      }
-
-      try {
-        dfsClient.delete("/data/xx", true);
-        assertDelete("/data/xx", true);
-      } catch(IOException ioe) {
-    	ioe.printStackTrace();
-      }
-      
-      try {
-        dfsClient.rename("/data/xx", "/data/yy");    
-        assertRename("/data/xx", "/data/yy", true);
-      } catch(IOException ioe) {
-    	ioe.printStackTrace();
-      }
-        
-    } catch (AssertionFailedError afe) {
-      afe.printStackTrace();
-      throw afe;
-    } catch (Throwable t) {
-      msg("Unexpected exception_a: " + t);
-      t.printStackTrace();
-    } finally {
-      shutdownDFS();
-
-    }
-  }
-
-  private void createFile(String filename, long fileSize) throws IOException { 
-    //
-    //           write filesize of data to file
-    //
-    byte[] buffer = new byte[BUFFER_SIZE];
-    UTF8 testFileName = new UTF8(filename); // hardcode filename
-    OutputStream nos;
-    nos = dfsClient.create(testFileName.toString(), false);
-    try {
-      for (long nBytesWritten = 0L;
-           nBytesWritten < fileSize;
-           nBytesWritten += buffer.length) {
-        if ((nBytesWritten + buffer.length) > fileSize) {
-          int pb = (int) (fileSize - nBytesWritten);
-          byte[] bufferPartial = new byte[pb];
-          for(int i=0; i<pb; i++) {
-            bufferPartial[i]='a';
-          }
-          nos.write(buffer);
-        } else {
-          for(int i=0; i<buffer.length;i++) {
-            buffer[i]='a';
-          }
-          nos.write(buffer);
-        }
-      }
-    } finally {
-      nos.flush();
-      nos.close();
-    }
-  }
-
-  private void assertMkdirs(String fileName, boolean failed) {
-    assertHasLogged("NameNode.mkdirs: " +fileName, DIR_LOG_HEADER_LEN+1);
-    assertHasLogged("NameSystem.mkdirs: "+fileName, DIR_LOG_HEADER_LEN);
-    if (failed)
-      assertHasLogged("FSDirectory.mkdirs: "
-                      +"failed to create directory "+fileName, DIR_LOG_HEADER_LEN);
-    else
-      assertHasLogged("FSDirectory.mkdirs: created directory "+fileName, DIR_LOG_HEADER_LEN);
-  }
-  
-  private void assertCreate(String fileName, int filesize, boolean failed) {
-    assertHasLogged("NameNode.create: file "+fileName, DIR_LOG_HEADER_LEN+1);
-    assertHasLogged("NameSystem.startFile: file "+fileName, DIR_LOG_HEADER_LEN);
-    if (failed) {
-      assertHasLogged("NameSystem.startFile: "
-                      +"failed to create file " + fileName, DIR_LOG_HEADER_LEN);
-    } else {
-      assertHasLogged("NameSystem.allocateBlock: "+fileName, BLOCK_LOG_HEADER_LEN);
-      int blockNum = (filesize/BLOCK_SIZE*BLOCK_SIZE==filesize)?
-        filesize/BLOCK_SIZE : 1+filesize/BLOCK_SIZE;
-      for(int i=1; i<blockNum; i++) {
-        assertHasLogged("NameNode.addBlock: file "+fileName, BLOCK_LOG_HEADER_LEN+1);
-        assertHasLogged("NameSystem.getAdditionalBlock: file "+fileName, BLOCK_LOG_HEADER_LEN);
-        assertHasLogged("NameSystem.allocateBlock: "+fileName, BLOCK_LOG_HEADER_LEN);
-      }
-      assertHasLogged("NameNode.complete: "+fileName, DIR_LOG_HEADER_LEN+1);
-      assertHasLogged("NameSystem.completeFile: "+fileName, DIR_LOG_HEADER_LEN);
-      assertHasLogged("FSDirectory.addFile: "+fileName+" with "
-                      +blockNum+" blocks is added to the file system", DIR_LOG_HEADER_LEN);
-      assertHasLogged("NameSystem.completeFile: "+fileName
-                      +" is removed from pendingCreates", DIR_LOG_HEADER_LEN);
-    }
-  }
-  
-  private void assertDelete(String fileName, boolean failed) {
-    assertHasLogged("NameNode.delete: "+fileName, DIR_LOG_HEADER_LEN+1);
-    assertHasLogged("NameSystem.delete: "+fileName, DIR_LOG_HEADER_LEN);
-    assertHasLogged("FSDirectory.delete: "+fileName, DIR_LOG_HEADER_LEN);
-    if (failed)
-      assertHasLogged("FSDirectory.unprotectedDelete: "
-                      +"failed to remove "+fileName, DIR_LOG_HEADER_LEN);
-    else
-      assertHasLogged("FSDirectory.unprotectedDelete: "
-                      +fileName+" is removed", DIR_LOG_HEADER_LEN);
-  }
-  
-  private void assertRename(String src, String dst, boolean failed) {
-    assertHasLogged("NameNode.rename: "+src+" to "+dst, DIR_LOG_HEADER_LEN+1);
-    assertHasLogged("NameSystem.renameTo: "+src+" to "+dst, DIR_LOG_HEADER_LEN);
-    assertHasLogged("FSDirectory.renameTo: "+src+" to "+dst, DIR_LOG_HEADER_LEN);
-    if (failed)
-      assertHasLogged("FSDirectory.unprotectedRenameTo: "
-                      +"failed to rename "+src+" to "+dst, DIR_LOG_HEADER_LEN);
-    else
-      assertHasLogged("FSDirectory.unprotectedRenameTo: "
-                      +src+" is renamed to "+dst, DIR_LOG_HEADER_LEN);
-  }
-  
-  private void assertHasLogged(String target, int headerLen) {
-    String line;
-    boolean notFound = true;
-    try {
-      while(notFound && (line=logfh.readLine()) != null) {
-        if (line.length()>headerLen && line.startsWith(target, headerLen))
-          notFound = false;
-      }
-    } catch(java.io.IOException e) {
-      throw new AssertionFailedError("error reading the log file");
-    }
-    if (notFound) {
-      throw new AssertionFailedError(target+" not logged");
-    }
-  }
-
-  //
-  //     modify config for test
-  //
-  private void configureDFS() throws IOException {
-    // set given config param to override other config settings
-    conf.setInt("dfs.block.size", BLOCK_SIZE);
-    // verify that config changed
-    assertTrue(BLOCK_SIZE == conf.getInt("dfs.block.size", 2)); // 2 is an intentional obviously-wrong block size
-    // downsize for testing (just to save resources)
-    conf.setInt("dfs.namenode.handler.count", 3);
-    conf.setLong("dfs.blockreport.intervalMsec", 50*1000L);
-    conf.setLong("dfs.datanode.startupMsec", 15*1000L);
-    conf.setInt("dfs.replication", 2);
-    System.setProperty("hadoop.log.dir", baseDirSpecified+"/logs");
-    conf.setInt("hadoop.logfile.count", 1);
-    conf.setInt("hadoop.logfile.size", 1000000000);
-  }
-  
-  private void startDFS(int dataNodeNum) throws IOException {
-    //
-    //          start a NameNode
-    String nameNodeSocketAddr = "localhost:" + nameNodePort;
-    FileSystem.setDefaultUri(conf, "hdfs://"+nameNodeSocketAddr);
-    
-    String nameFSDir = baseDirSpecified + "/name";
-    conf.set("dfs.name.dir", nameFSDir);
-	
-    NameNode.format(conf);
-    
-    nameNodeDaemon = new NameNode(nameNodeSocketAddr, conf);
-
-    //
-    //        start DataNodes
-    //
-    for (int i = 0; i < dataNodeNum; i++) {
-      // uniquely config real fs path for data storage for this datanode
-      String dataDir[] = new String[1];
-      dataDir[0] = baseDirSpecified + "/datanode" + i;
-      conf.set("dfs.data.dir", dataDir[0]);
-      DataNode dn = DataNode.makeInstance(dataDir, conf);
-      if (dn != null) {
-        dataNodeDaemons.add(dn);
-        (new Thread(dn, "DataNode" + i + ": " + dataDir[0])).start();
-      }
-    }
-	         
-    assertTrue("incorrect datanodes for test to continue",
-               (dataNodeDaemons.size() == dataNodeNum));
-    //
-    //          wait for datanodes to report in
-    try {
-      awaitQuiescence();
-    } catch(InterruptedException e) {
-      e.printStackTrace();
-    }
-      
-    //  act as if namenode is a remote process
-    dfsClient = new DFSClient(new InetSocketAddress("localhost", nameNodePort), conf);
-  }
-
-  private void shutdownDFS() {
-    // shutdown client
-    if (dfsClient != null) {
-      try {
-        msg("close down subthreads of DFSClient");
-        dfsClient.close();
-      } catch (Exception ignored) { }
-      msg("finished close down of DFSClient");
-    }
-
-    //
-    // shut down datanode daemons (this takes advantage of being same-process)
-    msg("begin shutdown of all datanode daemons");
-
-    for (int i = 0; i < dataNodeDaemons.size(); i++) {
-      DataNode dataNode = dataNodeDaemons.get(i);
-      try {
-        dataNode.shutdown();
-      } catch (Exception e) {
-        msg("ignoring exception during (all) datanode shutdown, e=" + e);
-      }
-    }
-    msg("finished shutdown of all datanode daemons");
-      
-    // shutdown namenode
-    msg("begin shutdown of namenode daemon");
-    try {
-      nameNodeDaemon.stop();
-    } catch (Exception e) {
-      msg("ignoring namenode shutdown exception=" + e);
-    }
-    msg("finished shutdown of namenode daemon");
-  }
-  
-  /** Wait for the DFS datanodes to become quiescent.
-   * The initial implementation is to sleep for some fixed amount of time,
-   * but a better implementation would be to really detect when distributed
-   * operations are completed.
-   * @throws InterruptedException
-   */
-  private void awaitQuiescence() throws InterruptedException {
-    // ToDo: Need observer pattern, not static sleep
-    // Doug suggested that the block report interval could be made shorter
-    //   and then observing that would be a good way to know when an operation
-    //   was complete (quiescence detect).
-    sleepAtLeast(30000);
-  }
-
-  private void msg(String s) {
-    //System.out.println(s);
-    LOG.info(s);
-  }
-
-  public static void sleepAtLeast(int tmsec) {
-    long t0 = System.currentTimeMillis();
-    long t1 = t0;
-    long tslept = t1 - t0;
-    while (tmsec > tslept) {
-      try {
-        long tsleep = tmsec - tslept;
-        Thread.sleep(tsleep);
-        t1 = System.currentTimeMillis();
-      }  catch (InterruptedException ie) {
-        t1 = System.currentTimeMillis();
-      }
-      tslept = t1 - t0;
-    }
-  }
-
-  public static void main(String[] args) throws Exception {
-    String classname = ClusterTestDFSNamespaceLogging.class.getClass().getName();
-    String usage = "Usage: " + classname + " (no args)";
-    if (args.length != 0) {
-      System.err.println(usage);
-      System.exit(-1);
-    }
-    String[] testargs = {classname};
-    junit.textui.TestRunner.main(testargs);
-  }
-
-}

+ 0 - 2
src/test/org/apache/hadoop/test/AllTestDriver.java

@@ -27,7 +27,6 @@ import org.apache.hadoop.mapred.TestMapRed;
 import org.apache.hadoop.mapred.TestSequenceFileInputFormat;
 import org.apache.hadoop.mapred.TestSequenceFileInputFormat;
 import org.apache.hadoop.mapred.TestTextInputFormat;
 import org.apache.hadoop.mapred.TestTextInputFormat;
 import org.apache.hadoop.hdfs.BenchmarkThroughput;
 import org.apache.hadoop.hdfs.BenchmarkThroughput;
-import org.apache.hadoop.hdfs.ClusterTestDFS;
 import org.apache.hadoop.hdfs.NNBench;
 import org.apache.hadoop.hdfs.NNBench;
 import org.apache.hadoop.fs.DistributedFSCheck;
 import org.apache.hadoop.fs.DistributedFSCheck;
 import org.apache.hadoop.fs.TestDFSIO;
 import org.apache.hadoop.fs.TestDFSIO;
@@ -55,7 +54,6 @@ public class AllTestDriver {
       pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
       pgd.addClass("mrbench", MRBench.class, "A map/reduce benchmark that can create many small jobs");
       pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
       pgd.addClass("nnbench", NNBench.class, "A benchmark that stresses the namenode.");
       pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
       pgd.addClass("mapredtest", TestMapRed.class, "A map/reduce test check.");
-      pgd.addClass("clustertestdfs", ClusterTestDFS.class, "A pseudo distributed test for DFS.");
       pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
       pgd.addClass("testfilesystem", TestFileSystem.class, "A test for FileSystem read/write.");
       pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
       pgd.addClass("testsequencefile", TestSequenceFile.class, "A test for flat files of binary key value pairs.");
       pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");
       pgd.addClass("testsetfile", TestSetFile.class, "A test for flat files of binary key/value pairs.");