浏览代码

HADOOP-96. Logging improvements. Contributed by Hairong Kuang.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@400112 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 19 年之前
父节点
当前提交
aa8f6fa9b3

+ 6 - 0
CHANGES.txt

@@ -188,6 +188,12 @@ Trunk (unreleased)
     status of reduce tasks or completed jobs.  Also fixes the progress
     status of reduce tasks or completed jobs.  Also fixes the progress
     meter so that failed tasks are subtracted. (omalley via cutting)
     meter so that failed tasks are subtracted. (omalley via cutting)
 
 
+49. HADOOP-96.  Logging improvements.  Log files are now separate from
+    standard output and standard error files.  Logs are now rolled.
+    Logging of all DFS state changes can be enabled, to facilitate
+    debugging.  (Hairong Kuang via cutting)
+
+
 Release 0.1.1 - 2006-04-08
 Release 0.1.1 - 2006-04-08
 
 
  1. Added CHANGES.txt, logging all significant changes to Hadoop.  (cutting)
  1. Added CHANGES.txt, logging all significant changes to Hadoop.  (cutting)

+ 3 - 1
bin/hadoop

@@ -51,7 +51,7 @@ shift
 
 
 # some directories
 # some directories
 THIS_DIR=`dirname "$THIS"`
 THIS_DIR=`dirname "$THIS"`
-HADOOP_HOME=`cd "$THIS_DIR/.." ; pwd`
+export HADOOP_HOME=`cd "$THIS_DIR/.." ; pwd`
 
 
 # Allow alternate conf dir location.
 # Allow alternate conf dir location.
 HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
 HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
@@ -143,6 +143,8 @@ fi
 # cygwin path translation
 # cygwin path translation
 if expr `uname` : 'CYGWIN*' > /dev/null; then
 if expr `uname` : 'CYGWIN*' > /dev/null; then
   CLASSPATH=`cygpath -p -w "$CLASSPATH"`
   CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+  HADOOP_HOME=`cygpath -p -w "$HADOOP_HOME"`
+  HADOOP_LOG_DIR=`cygpath -p -w "$HADOOP_LOG_DIR"`
 fi
 fi
 
 
 # run it
 # run it

+ 5 - 6
bin/hadoop-daemon.sh

@@ -38,7 +38,7 @@ while [ -h "$this" ]; do
 done
 done
 
 
 # the root of the Hadoop installation
 # the root of the Hadoop installation
-HADOOP_HOME=`dirname "$this"`/..
+export HADOOP_HOME=`dirname "$this"`/..
 
 
 # Allow alternate conf dir location.
 # Allow alternate conf dir location.
 HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
 HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
@@ -49,9 +49,9 @@ fi
 
 
 # get log directory
 # get log directory
 if [ "$HADOOP_LOG_DIR" = "" ]; then
 if [ "$HADOOP_LOG_DIR" = "" ]; then
-  HADOOP_LOG_DIR="$HADOOP_HOME/logs"
-  mkdir -p "$HADOOP_LOG_DIR"
+  export HADOOP_LOG_DIR="$HADOOP_HOME/logs"
 fi
 fi
+mkdir -p "$HADOOP_LOG_DIR"
 
 
 if [ "$HADOOP_PID_DIR" = "" ]; then
 if [ "$HADOOP_PID_DIR" = "" ]; then
   HADOOP_PID_DIR=/tmp
   HADOOP_PID_DIR=/tmp
@@ -62,7 +62,7 @@ if [ "$HADOOP_IDENT_STRING" = "" ]; then
 fi
 fi
 
 
 # some variables
 # some variables
-log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-`hostname`.log
+log=$HADOOP_LOG_DIR/hadoop-$HADOOP_IDENT_STRING-$command-`hostname`.out
 pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
 pid=$HADOOP_PID_DIR/hadoop-$HADOOP_IDENT_STRING-$command.pid
 
 
 case $startStop in
 case $startStop in
@@ -81,9 +81,8 @@ case $startStop in
       rsync -a -e ssh --delete --exclude=.svn $HADOOP_MASTER/ "$HADOOP_HOME"
       rsync -a -e ssh --delete --exclude=.svn $HADOOP_MASTER/ "$HADOOP_HOME"
     fi
     fi
 
 
-    cd "$HADOOP_HOME"
     echo starting $command, logging to $log
     echo starting $command, logging to $log
-    nohup bin/hadoop $command "$@" >& "$log" < /dev/null &
+    nohup $HADOOP_HOME/bin/hadoop $command "$@" >& "$log" < /dev/null &
     echo $! > $pid
     echo $! > $pid
     sleep 1; head "$log"
     sleep 1; head "$log"
     ;;
     ;;

+ 3 - 1
bin/hadoop-daemons.sh

@@ -13,4 +13,6 @@ fi
 bin=`dirname "$0"`
 bin=`dirname "$0"`
 bin=`cd "$bin"; pwd`
 bin=`cd "$bin"; pwd`
 
 
-exec "$bin/slaves.sh" "$bin/hadoop-daemon.sh" "$@"
+HADOOP_HOME="$bin/.."
+
+exec "$bin/slaves.sh" cd "$HADOOP_HOME" \; "$bin/hadoop-daemon.sh" "$@"

+ 22 - 0
conf/hadoop-default.xml

@@ -7,6 +7,28 @@
 
 
 <configuration>
 <configuration>
 
 
+<!--- logging properties -->
+
+<property>
+  <name>hadoop.logfile.size</name>
+  <value>10000000</value>
+  <description>The max size of each log file</description>
+</property>
+
+<property>
+  <name>hadoop.logfile.count</name>
+  <value>10</value>
+  <description>The max number of log files</description>
+</property>
+
+<property>
+  <name>dfs.namenode.logging.level</name>
+  <value>info</value>
+  <description>The logging level for dfs namenode. Other values are "dir"(trac
+e namespace mutations), "block"(trace block under/over replications and block
+creations/deletions), or "all".</description>
+</property>
+
 <!-- i/o properties -->
 <!-- i/o properties -->
 
 
 <property>
 <property>

+ 3 - 1
src/java/org/apache/hadoop/dfs/DataNode.java

@@ -824,7 +824,9 @@ public class DataNode implements FSConstants, Runnable {
     /**
     /**
      */
      */
     public static void main(String args[]) throws IOException {
     public static void main(String args[]) throws IOException {
+        Configuration conf = new Configuration();
         LogFormatter.setShowThreadIDs(true);
         LogFormatter.setShowThreadIDs(true);
-        runAndWait(new Configuration());
+        LogFormatter.initFileHandler(conf, "datanode");
+        runAndWait(conf);
     }
     }
 }
 }

+ 29 - 2
src/java/org/apache/hadoop/dfs/FSDirectory.java

@@ -622,8 +622,12 @@ class FSDirectory implements FSConstants {
         String pathString = path.toString();
         String pathString = path.toString();
         mkdirs(new Path(pathString).getParent().toString());
         mkdirs(new Path(pathString).getParent().toString());
         INode newNode = new INode( new File(pathString).getName(), blocks, replication);
         INode newNode = new INode( new File(pathString).getName(), blocks, replication);
-        if( ! unprotectedAddFile(path, newNode) )
-          return false;
+        if( ! unprotectedAddFile(path, newNode) ) {
+           NameNode.stateChangeLog.info("DIR* FSDirectory.addFile: "
+                    +"failed to add "+path+" with "
+                    +blocks.length+" blocks to the file system" );
+           return false;
+        }
         // add create file record to log
         // add create file record to log
         UTF8 nameReplicationPair[] = new UTF8[] { 
         UTF8 nameReplicationPair[] = new UTF8[] { 
                               path, 
                               path, 
@@ -631,6 +635,8 @@ class FSDirectory implements FSConstants {
         logEdit(OP_ADD,
         logEdit(OP_ADD,
                 new ArrayWritable( UTF8.class, nameReplicationPair ), 
                 new ArrayWritable( UTF8.class, nameReplicationPair ), 
                 new ArrayWritable( Block.class, newNode.blocks ));
                 new ArrayWritable( Block.class, newNode.blocks ));
+        NameNode.stateChangeLog.fine("DIR* FSDirectory.addFile: "
+                +path+" with "+blocks.length+" blocks is added to the file system" );
         return true;
         return true;
     }
     }
     
     
@@ -658,6 +664,8 @@ class FSDirectory implements FSConstants {
      * Change the filename
      * Change the filename
      */
      */
     public boolean renameTo(UTF8 src, UTF8 dst) {
     public boolean renameTo(UTF8 src, UTF8 dst) {
+        NameNode.stateChangeLog.fine("DIR* FSDirectory.renameTo: "
+                +src+" to "+dst );
         waitForReady();
         waitForReady();
         if (unprotectedRenameTo(src, dst)) {
         if (unprotectedRenameTo(src, dst)) {
             logEdit(OP_RENAME, src, dst);
             logEdit(OP_RENAME, src, dst);
@@ -675,6 +683,8 @@ class FSDirectory implements FSConstants {
           String dstStr = dst.toString();
           String dstStr = dst.toString();
             INode renamedNode = rootDir.getNode(srcStr);
             INode renamedNode = rootDir.getNode(srcStr);
             if (renamedNode == null) {
             if (renamedNode == null) {
+                NameNode.stateChangeLog.warning("DIR* FSDirectory.unprotectedRenameTo: "
+                        +"failed to rename "+src+" to "+dst+ " because "+ src+" does not exist" );
                 return false;
                 return false;
             }
             }
             renamedNode.removeNode();
             renamedNode.removeNode();
@@ -683,9 +693,13 @@ class FSDirectory implements FSConstants {
             }
             }
             // the renamed node can be reused now
             // the renamed node can be reused now
             if( rootDir.addNode(dstStr, renamedNode ) == null ) {
             if( rootDir.addNode(dstStr, renamedNode ) == null ) {
+                NameNode.stateChangeLog.warning("DIR* FSDirectory.unprotectedRenameTo: "
+                        +"failed to rename "+src+" to "+dst );
               rootDir.addNode(srcStr, renamedNode); // put it back
               rootDir.addNode(srcStr, renamedNode); // put it back
               return false;
               return false;
             }
             }
+            NameNode.stateChangeLog.fine("DIR* FSDirectory.unprotectedRenameTo: "
+                     +src+" is renamed to "+dst );
             return true;
             return true;
         }
         }
     }
     }
@@ -738,6 +752,8 @@ class FSDirectory implements FSConstants {
      * Remove the file from management, return blocks
      * Remove the file from management, return blocks
      */
      */
     public Block[] delete(UTF8 src) {
     public Block[] delete(UTF8 src) {
+        NameNode.stateChangeLog.fine("DIR* FSDirectory.delete: "
+                +src );
         waitForReady();
         waitForReady();
         Block[] blocks = unprotectedDelete(src); 
         Block[] blocks = unprotectedDelete(src); 
         if( blocks != null )
         if( blocks != null )
@@ -751,6 +767,8 @@ class FSDirectory implements FSConstants {
         synchronized (rootDir) {
         synchronized (rootDir) {
             INode targetNode = rootDir.getNode(src.toString());
             INode targetNode = rootDir.getNode(src.toString());
             if (targetNode == null) {
             if (targetNode == null) {
+                NameNode.stateChangeLog.warning("DIR* FSDirectory.unprotectedDelete: "
+                        +"failed to remove "+src+" because it does not exist" );
                 return null;
                 return null;
             } else {
             } else {
                 //
                 //
@@ -758,8 +776,12 @@ class FSDirectory implements FSConstants {
                 // the blocks underneath the node.
                 // the blocks underneath the node.
                 //
                 //
                 if (! targetNode.removeNode()) {
                 if (! targetNode.removeNode()) {
+                    NameNode.stateChangeLog.warning("DIR* FSDirectory.unprotectedDelete: "
+                            +"failed to remove "+src+" because it does not have a parent" );
                     return null;
                     return null;
                 } else {
                 } else {
+                    NameNode.stateChangeLog.fine("DIR* FSDirectory.unprotectedDelete: "
+                            +src+" is removed" );
                     Vector v = new Vector();
                     Vector v = new Vector();
                     targetNode.collectSubtreeBlocks(v);
                     targetNode.collectSubtreeBlocks(v);
                     for (Iterator it = v.iterator(); it.hasNext(); ) {
                     for (Iterator it = v.iterator(); it.hasNext(); ) {
@@ -905,12 +927,17 @@ class FSDirectory implements FSConstants {
             String cur = (String) v.elementAt(i);
             String cur = (String) v.elementAt(i);
             INode inserted = unprotectedMkdir(cur);
             INode inserted = unprotectedMkdir(cur);
             if (inserted != null) {
             if (inserted != null) {
+                NameNode.stateChangeLog.fine("DIR* FSDirectory.mkdirs: "
+                        +"created directory "+cur );
                 logEdit(OP_MKDIR, new UTF8(inserted.computeName()), null);
                 logEdit(OP_MKDIR, new UTF8(inserted.computeName()), null);
                 lastSuccess = true;
                 lastSuccess = true;
             } else {
             } else {
                 lastSuccess = false;
                 lastSuccess = false;
             }
             }
         }
         }
+        if( !lastSuccess )
+            NameNode.stateChangeLog.warning("DIR* FSDirectory.mkdirs: "
+                    +"failed to create directory "+src );
         return lastSuccess;
         return lastSuccess;
     }
     }
 
 

+ 135 - 50
src/java/org/apache/hadoop/dfs/FSNamesystem.java

@@ -284,7 +284,7 @@ class FSNamesystem implements FSConstants {
                                     short replication, 
                                     short replication, 
                                     UTF8 clientName 
                                     UTF8 clientName 
                                   ) throws IOException {
                                   ) throws IOException {
-      String text = "File " + src 
+      String text = "file " + src 
               + ((clientName != null) ? " on client " + clientName : "")
               + ((clientName != null) ? " on client " + clientName : "")
               + ".\n"
               + ".\n"
               + "Requested replication " + replication;
               + "Requested replication " + replication;
@@ -314,31 +314,38 @@ class FSNamesystem implements FSConstants {
                                             boolean overwrite,
                                             boolean overwrite,
                                             short replication 
                                             short replication 
                                           ) throws IOException {
                                           ) throws IOException {
+      NameNode.stateChangeLog.fine("DIR* NameSystem.startFile: file "
+            +src+" for "+holder+" at "+clientMachine);
       try {
       try {
         if (pendingCreates.get(src) != null) {
         if (pendingCreates.get(src) != null) {
-          String msg = "Cannot create file " + src + " for " + holder +
-                       " on " + clientMachine + 
-                       " because pendingCreates is non-null.";
-          throw new NameNode.AlreadyBeingCreatedException(msg);
+           throw new NameNode.AlreadyBeingCreatedException(
+                   "failed to create file " + src + " for " + holder +
+                   " on client " + clientMachine + 
+                   " because pendingCreates is non-null.");
         }
         }
 
 
-        verifyReplication(src.toString(), replication, clientMachine );
-        
+        try {
+           verifyReplication(src.toString(), replication, clientMachine );
+        } catch( IOException e) {
+            throw new IOException( "failed to create "+e.getMessage());
+        }
         if (!dir.isValidToCreate(src)) {
         if (!dir.isValidToCreate(src)) {
           if (overwrite) {
           if (overwrite) {
             delete(src);
             delete(src);
           } else {
           } else {
-            throw new IOException("Can't create file " + src + 
-                                  ", because the filename is invalid.");
+            throw new IOException("failed to create file " + src 
+                    +" on client " + clientMachine
+                    +" either because the filename is invalid or the file exists");
           }
           }
         }
         }
 
 
         // Get the array of replication targets 
         // Get the array of replication targets 
         DatanodeInfo targets[] = chooseTargets(replication, null, clientMachine);
         DatanodeInfo targets[] = chooseTargets(replication, null, clientMachine);
         if (targets.length < this.minReplication) {
         if (targets.length < this.minReplication) {
-            throw new IOException("Target-length is " + targets.length +
-                                  ", below MIN_REPLICATION (" + 
-                                  minReplication+ ")");
+            throw new IOException("failed to create file "+src
+                    +" on client " + clientMachine
+                    +" because target-length is " + targets.length 
+                    +", below MIN_REPLICATION (" + minReplication+ ")");
        }
        }
 
 
         // Reserve space for this pending file
         // Reserve space for this pending file
@@ -346,7 +353,8 @@ class FSNamesystem implements FSConstants {
                            new FileUnderConstruction(replication, 
                            new FileUnderConstruction(replication, 
                                                      holder,
                                                      holder,
                                                      clientMachine));
                                                      clientMachine));
-        LOG.fine("Adding " + src + " to pendingCreates for " + holder);
+        NameNode.stateChangeLog.finer( "DIR* NameSystem.startFile: "
+                   +"add "+src+" to pendingCreates for "+holder );
         synchronized (leases) {
         synchronized (leases) {
             Lease lease = (Lease) leases.get(holder);
             Lease lease = (Lease) leases.get(holder);
             if (lease == null) {
             if (lease == null) {
@@ -367,7 +375,8 @@ class FSNamesystem implements FSConstants {
         results[1] = targets;
         results[1] = targets;
         return results;
         return results;
       } catch (IOException ie) {
       } catch (IOException ie) {
-        LOG.warning(ie.getMessage());
+          NameNode.stateChangeLog.warning("DIR* NameSystem.startFile: "
+                  +ie.getMessage());
         throw ie;
         throw ie;
       }
       }
     }
     }
@@ -386,6 +395,8 @@ class FSNamesystem implements FSConstants {
     public synchronized Object[] getAdditionalBlock(UTF8 src, 
     public synchronized Object[] getAdditionalBlock(UTF8 src, 
                                                     UTF8 clientName
                                                     UTF8 clientName
                                                     ) throws IOException {
                                                     ) throws IOException {
+        NameNode.stateChangeLog.fine("BLOCK* NameSystem.getAdditionalBlock: file "
+            +src+" for "+clientName);
         FileUnderConstruction pendingFile = 
         FileUnderConstruction pendingFile = 
           (FileUnderConstruction) pendingCreates.get(src);
           (FileUnderConstruction) pendingCreates.get(src);
         // make sure that we still have the lease on this file
         // make sure that we still have the lease on this file
@@ -428,6 +439,8 @@ class FSNamesystem implements FSConstants {
         //
         //
         // Remove the block from the pending creates list
         // Remove the block from the pending creates list
         //
         //
+        NameNode.stateChangeLog.fine("BLOCK* NameSystem.abandonBlock: "
+                +b.getBlockName()+"of file "+src );
         FileUnderConstruction pendingFile = 
         FileUnderConstruction pendingFile = 
           (FileUnderConstruction) pendingCreates.get(src);
           (FileUnderConstruction) pendingCreates.get(src);
         if (pendingFile != null) {
         if (pendingFile != null) {
@@ -437,6 +450,10 @@ class FSNamesystem implements FSConstants {
                 if (cur.compareTo(b) == 0) {
                 if (cur.compareTo(b) == 0) {
                     pendingCreateBlocks.remove(cur);
                     pendingCreateBlocks.remove(cur);
                     it.remove();
                     it.remove();
+                    NameNode.stateChangeLog.finer(
+                             "BLOCK* NameSystem.abandonBlock: "
+                            +b.getBlockName()
+                            +" is removed from pendingCreateBlock and pendingCreates");
                     return true;
                     return true;
                 }
                 }
             }
             }
@@ -450,7 +467,7 @@ class FSNamesystem implements FSConstants {
     public synchronized void abandonFileInProgress(UTF8 src, 
     public synchronized void abandonFileInProgress(UTF8 src, 
                                                    UTF8 holder
                                                    UTF8 holder
                                                    ) throws IOException {
                                                    ) throws IOException {
-      LOG.info("abandoning file in progress on " + src.toString());
+      NameNode.stateChangeLog.fine("DIR* NameSystem.abandonFileInProgress:" + src );
       synchronized (leases) {
       synchronized (leases) {
         // find the lease
         // find the lease
         Lease lease = (Lease) leases.get(holder);
         Lease lease = (Lease) leases.get(holder);
@@ -478,10 +495,12 @@ class FSNamesystem implements FSConstants {
      * been reported by datanodes and are replicated correctly.
      * been reported by datanodes and are replicated correctly.
      */
      */
     public synchronized int completeFile(UTF8 src, UTF8 holder) {
     public synchronized int completeFile(UTF8 src, UTF8 holder) {
+        NameNode.stateChangeLog.fine("DIR* NameSystem.completeFile: " + src + " for " + holder );
         if (dir.getFile(src) != null || pendingCreates.get(src) == null) {
         if (dir.getFile(src) != null || pendingCreates.get(src) == null) {
-            LOG.info( "Failed to complete " + src + 
-                      "  because dir.getFile()==" + dir.getFile(src) + 
-                      " and " + pendingCreates.get(src));
+            NameNode.stateChangeLog.warning( "DIR* NameSystem.completeFile: "
+                    + "failed to complete " + src
+                    + " because dir.getFile()==" + dir.getFile(src) 
+                    + " and " + pendingCreates.get(src));
             return OPERATION_FAILED;
             return OPERATION_FAILED;
         } else if (! checkFileProgress(src)) {
         } else if (! checkFileProgress(src)) {
             return STILL_WAITING;
             return STILL_WAITING;
@@ -519,14 +538,14 @@ class FSNamesystem implements FSConstants {
         // Now we can add the (name,blocks) tuple to the filesystem
         // Now we can add the (name,blocks) tuple to the filesystem
         //
         //
         if ( ! dir.addFile(src, pendingBlocks, pendingFile.getReplication())) {
         if ( ! dir.addFile(src, pendingBlocks, pendingFile.getReplication())) {
-          System.out.println("AddFile() for " + src + " failed");
           return OPERATION_FAILED;
           return OPERATION_FAILED;
         }
         }
 
 
         // The file is no longer pending
         // The file is no longer pending
         pendingCreates.remove(src);
         pendingCreates.remove(src);
-        LOG.fine("Removing " + src + " from pendingCreates for " + holder +
-                 ". (complete)");
+        NameNode.stateChangeLog.finer(
+             "DIR* NameSystem.completeFile: " + src
+           + " is removed from pendingCreates");
         for (int i = 0; i < nrBlocks; i++) {
         for (int i = 0; i < nrBlocks; i++) {
             pendingCreateBlocks.remove(pendingBlocks[i]);
             pendingCreateBlocks.remove(pendingBlocks[i]);
         }
         }
@@ -554,13 +573,11 @@ class FSNamesystem implements FSConstants {
         for (int i = 0; i < nrBlocks; i++) {
         for (int i = 0; i < nrBlocks; i++) {
             TreeSet containingNodes = (TreeSet) blocksMap.get(pendingBlocks[i]);
             TreeSet containingNodes = (TreeSet) blocksMap.get(pendingBlocks[i]);
             if (containingNodes.size() < pendingFile.getReplication()) {
             if (containingNodes.size() < pendingFile.getReplication()) {
+                   NameNode.stateChangeLog.finer(
+                          "DIR* NameSystem.completeFile:"
+                        + pendingBlocks[i].getBlockName()+" has only "+containingNodes.size()
+                        +" replicas so is added to neededReplications");           
                 synchronized (neededReplications) {
                 synchronized (neededReplications) {
-                    LOG.info("Completed file " + src 
-                              + ", at holder " + holder 
-                              + ".  There is/are only " + containingNodes.size() 
-                              + " copies of block " + pendingBlocks[i] 
-                              + ", so replicating up to " 
-                              + pendingFile.getReplication());
                     neededReplications.add(pendingBlocks[i]);
                     neededReplications.add(pendingBlocks[i]);
                 }
                 }
             }
             }
@@ -577,6 +594,9 @@ class FSNamesystem implements FSConstants {
           (FileUnderConstruction) pendingCreates.get(src);
           (FileUnderConstruction) pendingCreates.get(src);
         v.getBlocks().add(b);
         v.getBlocks().add(b);
         pendingCreateBlocks.add(b);
         pendingCreateBlocks.add(b);
+        NameNode.stateChangeLog.finer("BLOCK* NameSystem.allocateBlock: "
+            +src+ ". "+b.getBlockName()+
+            " is created and added to pendingCreates and pendingCreateBlocks" );      
         return b;
         return b;
     }
     }
 
 
@@ -613,6 +633,7 @@ class FSNamesystem implements FSConstants {
      * Change the indicated filename.
      * Change the indicated filename.
      */
      */
     public boolean renameTo(UTF8 src, UTF8 dst) {
     public boolean renameTo(UTF8 src, UTF8 dst) {
+        NameNode.stateChangeLog.fine("DIR* NameSystem.renameTo: " + src + " to " + dst );
         return dir.renameTo(src, dst);
         return dir.renameTo(src, dst);
     }
     }
 
 
@@ -621,6 +642,7 @@ class FSNamesystem implements FSConstants {
      * invalidate some blocks that make up the file.
      * invalidate some blocks that make up the file.
      */
      */
     public synchronized boolean delete(UTF8 src) {
     public synchronized boolean delete(UTF8 src) {
+        NameNode.stateChangeLog.fine("DIR* NameSystem.delete: " + src );
         Block deletedBlocks[] = (Block[]) dir.delete(src);
         Block deletedBlocks[] = (Block[]) dir.delete(src);
         if (deletedBlocks != null) {
         if (deletedBlocks != null) {
             for (int i = 0; i < deletedBlocks.length; i++) {
             for (int i = 0; i < deletedBlocks.length; i++) {
@@ -636,6 +658,8 @@ class FSNamesystem implements FSConstants {
                             recentInvalidateSets.put(node.getName(), invalidateSet);
                             recentInvalidateSets.put(node.getName(), invalidateSet);
                         }
                         }
                         invalidateSet.add(b);
                         invalidateSet.add(b);
+                        NameNode.stateChangeLog.finer("BLOCK* NameSystem.delete: "
+                            + b.getBlockName() + " is added to invalidSet of " + node.getName() );
                     }
                     }
                 }
                 }
             }
             }
@@ -666,6 +690,7 @@ class FSNamesystem implements FSConstants {
      * Create all the necessary directories
      * Create all the necessary directories
      */
      */
     public boolean mkdirs(UTF8 src) {
     public boolean mkdirs(UTF8 src) {
+        NameNode.stateChangeLog.fine("DIR* NameSystem.mkdirs: " + src );
         return dir.mkdirs(src);
         return dir.mkdirs(src);
     }
     }
 
 
@@ -897,15 +922,18 @@ class FSNamesystem implements FSConstants {
       FileUnderConstruction v = 
       FileUnderConstruction v = 
         (FileUnderConstruction) pendingCreates.remove(src);
         (FileUnderConstruction) pendingCreates.remove(src);
       if (v != null) {
       if (v != null) {
-        LOG.info("Removing " + src + " from pendingCreates for " + 
-            holder + " (failure)");
+         NameNode.stateChangeLog.finer(
+                      "DIR* NameSystem.internalReleaseCreate: " + src
+                    + " is removed from pendingCreates for "
+                    + holder + " (failure)");
         for (Iterator it2 = v.getBlocks().iterator(); it2.hasNext(); ) {
         for (Iterator it2 = v.getBlocks().iterator(); it2.hasNext(); ) {
           Block b = (Block) it2.next();
           Block b = (Block) it2.next();
           pendingCreateBlocks.remove(b);
           pendingCreateBlocks.remove(b);
         }
         }
       } else {
       } else {
-        LOG.info("Attempt to release a create lock on " + src.toString()
-                 + " that was not in pendingCreates");
+          NameNode.stateChangeLog.warning("DIR* NameSystem.internalReleaseCreate: "
+                 + "attempt to release a create lock on "+ src.toString()
+                 + " that was not in pedingCreates");
       }
       }
     }
     }
 
 
@@ -949,8 +977,9 @@ class FSNamesystem implements FSConstants {
                 DatanodeInfo nodeinfo = (DatanodeInfo) datanodeMap.get(name);
                 DatanodeInfo nodeinfo = (DatanodeInfo) datanodeMap.get(name);
 
 
                 if (nodeinfo == null) {
                 if (nodeinfo == null) {
-                    LOG.info("Got brand-new heartbeat from " + name);
-                    nodeinfo = new DatanodeInfo(name, capacity, remaining);
+                    NameNode.stateChangeLog.fine("BLOCK* NameSystem.gotHeartbeat: "
+                            +"brand-new heartbeat from "+name );
+                     nodeinfo = new DatanodeInfo(name, capacity, remaining);
                     datanodeMap.put(name, nodeinfo);
                     datanodeMap.put(name, nodeinfo);
                     capacityDiff = capacity;
                     capacityDiff = capacity;
                     remainingDiff = remaining;
                     remainingDiff = remaining;
@@ -995,11 +1024,13 @@ class FSNamesystem implements FSConstants {
             while ((heartbeats.size() > 0) &&
             while ((heartbeats.size() > 0) &&
                    ((nodeInfo = (DatanodeInfo) heartbeats.first()) != null) &&
                    ((nodeInfo = (DatanodeInfo) heartbeats.first()) != null) &&
                    (nodeInfo.lastUpdate() < System.currentTimeMillis() - EXPIRE_INTERVAL)) {
                    (nodeInfo.lastUpdate() < System.currentTimeMillis() - EXPIRE_INTERVAL)) {
-                LOG.info("Lost heartbeat for " + nodeInfo.getName());
-
                 heartbeats.remove(nodeInfo);
                 heartbeats.remove(nodeInfo);
+                NameNode.stateChangeLog.info("BLOCK* NameSystem.heartbeatCheck: "
+                           + "lost heartbeat from " + nodeInfo.getName());
                 synchronized (datanodeMap) {
                 synchronized (datanodeMap) {
                     datanodeMap.remove(nodeInfo.getName());
                     datanodeMap.remove(nodeInfo.getName());
+                    NameNode.stateChangeLog.finer("BLOCK* NameSystem.heartbeatCheck: "
+                            + nodeInfo.getName() + " is removed from datanodeMap");
                 }
                 }
                 totalCapacity -= nodeInfo.getCapacity();
                 totalCapacity -= nodeInfo.getCapacity();
                 totalRemaining -= nodeInfo.getRemaining();
                 totalRemaining -= nodeInfo.getRemaining();
@@ -1023,8 +1054,12 @@ class FSNamesystem implements FSConstants {
      * update the (machine-->blocklist) and (block-->machinelist) tables.
      * update the (machine-->blocklist) and (block-->machinelist) tables.
      */
      */
     public synchronized Block[] processReport(Block newReport[], UTF8 name) {
     public synchronized Block[] processReport(Block newReport[], UTF8 name) {
+        NameNode.stateChangeLog.fine("BLOCK* NameSystem.processReport: "
+                +"from "+name+" "+newReport.length+" blocks" );
         DatanodeInfo node = (DatanodeInfo) datanodeMap.get(name);
         DatanodeInfo node = (DatanodeInfo) datanodeMap.get(name);
         if (node == null) {
         if (node == null) {
+            NameNode.stateChangeLog.severe("BLOCK* NameSystem.processReport: "
+                    +"from "+name+" but can not find its info" );
             throw new IllegalArgumentException("Unexpected exception.  Received block report from node " + name + ", but there is no info for " + name);
             throw new IllegalArgumentException("Unexpected exception.  Received block report from node " + name + ", but there is no info for " + name);
         }
         }
 
 
@@ -1084,8 +1119,9 @@ class FSNamesystem implements FSConstants {
             Block b = (Block) it.next();
             Block b = (Block) it.next();
 
 
             if (! dir.isValidBlock(b) && ! pendingCreateBlocks.contains(b)) {
             if (! dir.isValidBlock(b) && ! pendingCreateBlocks.contains(b)) {
-                LOG.info("Obsoleting block " + b);
                 obsolete.add(b);
                 obsolete.add(b);
+                NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: "
+                        +"ask "+name+" to delete "+b.getBlockName() );
             }
             }
         }
         }
         return (Block[]) obsolete.toArray(new Block[obsolete.size()]);
         return (Block[]) obsolete.toArray(new Block[obsolete.size()]);
@@ -1103,8 +1139,19 @@ class FSNamesystem implements FSConstants {
         }
         }
         if (! containingNodes.contains(node)) {
         if (! containingNodes.contains(node)) {
             containingNodes.add(node);
             containingNodes.add(node);
+            // 
+            // Hairong: I would prefer to set the level of next logrecord
+            // to be finer.
+            // But at startup time, because too many new blocks come in
+            // they simply take up all the space in the log file 
+            // So I set the level to be finest
+            //
+            NameNode.stateChangeLog.finest("BLOCK* NameSystem.addStoredBlock: "
+                    +"blockMap updated: "+node.getName()+" is added to "+block.getBlockName() );
         } else {
         } else {
-            LOG.info("Redundant addStoredBlock request received for block " + block + " on node " + node);
+            NameNode.stateChangeLog.warning("BLOCK* NameSystem.addStoredBlock: "
+                    + "Redundant addStoredBlock request received for " 
+                    + block.getBlockName() + " on " + node.getName());
         }
         }
 
 
         synchronized (neededReplications) {
         synchronized (neededReplications) {
@@ -1115,8 +1162,15 @@ class FSNamesystem implements FSConstants {
             if (containingNodes.size() >= fileReplication ) {
             if (containingNodes.size() >= fileReplication ) {
                 neededReplications.remove(block);
                 neededReplications.remove(block);
                 pendingReplications.remove(block);
                 pendingReplications.remove(block);
-            } else // containingNodes.size() < fileReplication
+                NameNode.stateChangeLog.finest("BLOCK* NameSystem.addStoredBlock: "
+                        +block.getBlockName()+" has "+containingNodes.size()
+                        +" replicas so is removed from neededReplications and pendingReplications" );
+            } else {// containingNodes.size() < fileReplication
                 neededReplications.add(block);
                 neededReplications.add(block);
+                NameNode.stateChangeLog.finer("BLOCK* NameSystem.addStoredBlock: "
+                    +block.getBlockName()+" has only "+containingNodes.size()
+                    +" replicas so is added to neededReplications" );
+            }
 
 
             proccessOverReplicatedBlock( block, fileReplication );
             proccessOverReplicatedBlock( block, fileReplication );
         }
         }
@@ -1161,6 +1215,8 @@ class FSNamesystem implements FSConstants {
                 excessReplicateMap.put(cur.getName(), excessBlocks);
                 excessReplicateMap.put(cur.getName(), excessBlocks);
             }
             }
             excessBlocks.add(b);
             excessBlocks.add(b);
+            NameNode.stateChangeLog.finer("BLOCK* NameSystem.chooseExcessReplicates: "
+                    +"("+cur.getName()+", "+b.getBlockName()+") is added to excessReplicateMap" );
 
 
             //
             //
             // The 'excessblocks' tracks blocks until we get confirmation
             // The 'excessblocks' tracks blocks until we get confirmation
@@ -1177,6 +1233,8 @@ class FSNamesystem implements FSConstants {
                 recentInvalidateSets.put(cur.getName(), invalidateSet);
                 recentInvalidateSets.put(cur.getName(), invalidateSet);
             }
             }
             invalidateSet.add(b);
             invalidateSet.add(b);
+            NameNode.stateChangeLog.finer("BLOCK* NameSystem.chooseExcessReplicates: "
+                    +"("+cur.getName()+", "+b.getBlockName()+") is added to recentInvalidateSets" );
         }
         }
     }
     }
 
 
@@ -1185,12 +1243,13 @@ class FSNamesystem implements FSConstants {
      * replication tasks, if the removed block is still valid.
      * replication tasks, if the removed block is still valid.
      */
      */
     synchronized void removeStoredBlock(Block block, DatanodeInfo node) {
     synchronized void removeStoredBlock(Block block, DatanodeInfo node) {
+        NameNode.stateChangeLog.fine("BLOCK* NameSystem.removeStoredBlock: "
+                +block.getBlockName() + " from "+node.getName() );
         TreeSet containingNodes = (TreeSet) blocksMap.get(block);
         TreeSet containingNodes = (TreeSet) blocksMap.get(block);
         if (containingNodes == null || ! containingNodes.contains(node)) {
         if (containingNodes == null || ! containingNodes.contains(node)) {
             throw new IllegalArgumentException("No machine mapping found for block " + block + ", which should be at node " + node);
             throw new IllegalArgumentException("No machine mapping found for block " + block + ", which should be at node " + node);
         }
         }
         containingNodes.remove(node);
         containingNodes.remove(node);
-
         //
         //
         // It's possible that the block was removed because of a datanode
         // It's possible that the block was removed because of a datanode
         // failure.  If the block is still valid, check if replication is
         // failure.  If the block is still valid, check if replication is
@@ -1202,6 +1261,9 @@ class FSNamesystem implements FSConstants {
             synchronized (neededReplications) {
             synchronized (neededReplications) {
                 neededReplications.add(block);
                 neededReplications.add(block);
             }
             }
+            NameNode.stateChangeLog.finer("BLOCK* NameSystem.removeStoredBlock: "
+                    +block.getBlockName()+" has only "+containingNodes.size()
+                    +" replicas so is added to neededReplications" );
         }
         }
 
 
         //
         //
@@ -1211,6 +1273,8 @@ class FSNamesystem implements FSConstants {
         TreeSet excessBlocks = (TreeSet) excessReplicateMap.get(node.getName());
         TreeSet excessBlocks = (TreeSet) excessReplicateMap.get(node.getName());
         if (excessBlocks != null) {
         if (excessBlocks != null) {
             excessBlocks.remove(block);
             excessBlocks.remove(block);
+            NameNode.stateChangeLog.finer("BLOCK* NameSystem.removeStoredBlock: "
+                    +block.getBlockName()+" is removed from excessBlocks" );
             if (excessBlocks.size() == 0) {
             if (excessBlocks.size() == 0) {
                 excessReplicateMap.remove(node.getName());
                 excessReplicateMap.remove(node.getName());
             }
             }
@@ -1223,8 +1287,12 @@ class FSNamesystem implements FSConstants {
     public synchronized void blockReceived(Block block, UTF8 name) {
     public synchronized void blockReceived(Block block, UTF8 name) {
         DatanodeInfo node = (DatanodeInfo) datanodeMap.get(name);
         DatanodeInfo node = (DatanodeInfo) datanodeMap.get(name);
         if (node == null) {
         if (node == null) {
+            NameNode.stateChangeLog.warning("BLOCK* NameSystem.blockReceived: "
+                    +block.getBlockName()+" is received from an unrecorded node " + name );
             throw new IllegalArgumentException("Unexpected exception.  Got blockReceived message from node " + name + ", but there is no info for " + name);
             throw new IllegalArgumentException("Unexpected exception.  Got blockReceived message from node " + name + ", but there is no info for " + name);
         }
         }
+        NameNode.stateChangeLog.fine("BLOCK* NameSystem.blockReceived: "
+                +block.getBlockName()+" is received from " + name );
         //
         //
         // Modify the blocks->datanode map
         // Modify the blocks->datanode map
         // 
         // 
@@ -1279,11 +1347,20 @@ class FSNamesystem implements FSConstants {
      */
      */
     public synchronized Block[] blocksToInvalidate(UTF8 sender) {
     public synchronized Block[] blocksToInvalidate(UTF8 sender) {
         Vector invalidateSet = (Vector) recentInvalidateSets.remove(sender);
         Vector invalidateSet = (Vector) recentInvalidateSets.remove(sender);
-        if (invalidateSet != null) {
-            return (Block[]) invalidateSet.toArray(new Block[invalidateSet.size()]);
-        } else {
+ 
+        if (invalidateSet == null ) 
             return null;
             return null;
+        
+        if(NameNode.stateChangeLog.isLoggable(Level.INFO)) {
+            StringBuffer blockList = new StringBuffer();
+            for( int i=0; i<invalidateSet.size(); i++ ) {
+                blockList.append(' ');
+                blockList.append(((Block)invalidateSet.elementAt(i)).getBlockName());
+            }
+            NameNode.stateChangeLog.info("BLOCK* NameSystem.blockToInvalidate: "
+                   +"ask "+sender+" to delete " + blockList );
         }
         }
+        return (Block[]) invalidateSet.toArray(new Block[invalidateSet.size()]);
     }
     }
 
 
     /**
     /**
@@ -1299,7 +1376,7 @@ class FSNamesystem implements FSConstants {
     public synchronized Object[] pendingTransfers(DatanodeInfo srcNode, int xmitsInProgress) {
     public synchronized Object[] pendingTransfers(DatanodeInfo srcNode, int xmitsInProgress) {
         synchronized (neededReplications) {
         synchronized (neededReplications) {
             Object results[] = null;
             Object results[] = null;
-	    int scheduledXfers = 0;
+            int scheduledXfers = 0;
 
 
             if (neededReplications.size() > 0) {
             if (neededReplications.size() > 0) {
                 //
                 //
@@ -1334,7 +1411,7 @@ class FSNamesystem implements FSConstants {
                                 // Build items to return
                                 // Build items to return
                                 replicateBlocks.add(block);
                                 replicateBlocks.add(block);
                                 replicateTargetSets.add(targets);
                                 replicateTargetSets.add(targets);
-				scheduledXfers += targets.length;
+                                scheduledXfers += targets.length;
                             }
                             }
                         }
                         }
                     }
                     }
@@ -1356,14 +1433,22 @@ class FSNamesystem implements FSConstants {
                         if (containingNodes.size() + targets.length >= dir.getFileByBlock(block).getReplication()) {
                         if (containingNodes.size() + targets.length >= dir.getFileByBlock(block).getReplication()) {
                             neededReplications.remove(block);
                             neededReplications.remove(block);
                             pendingReplications.add(block);
                             pendingReplications.add(block);
+                            NameNode.stateChangeLog.finer("BLOCK* NameSystem.pendingTransfer: "
+                                    +block.getBlockName()
+                                    +" is removed from neededReplications to pendingReplications" );
                         }
                         }
 
 
-                        LOG.info("Pending transfer (block " 
-                            + block.getBlockName() 
-                            + ") from " + srcNode.getName() 
-                            + " to " + targets[0].getName() 
-                            + (targets.length > 1 ? " and " + (targets.length-1) 
-                                + " more destination(s)" : "" ));
+                        if(NameNode.stateChangeLog.isLoggable(Level.INFO)) {
+                            StringBuffer targetList = new StringBuffer( "datanode(s)");
+                            for(int k=0; k<targets.length; k++) {
+                               targetList.append(' ');
+                               targetList.append(targets[k].getName());
+                            }
+                            NameNode.stateChangeLog.info("BLOCK* NameSystem.pendingTransfer: "
+                                    +"ask "+srcNode.getName()
+                                    +" to replicate "+block.getBlockName()
+                                    +" to "+targetList);
+                        }
                     }
                     }
 
 
                     //
                     //

+ 35 - 2
src/java/org/apache/hadoop/dfs/NameNode.java

@@ -57,6 +57,7 @@ import java.util.logging.*;
  **********************************************************/
  **********************************************************/
 public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
 public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     public static final Logger LOG = LogFormatter.getLogger("org.apache.hadoop.dfs.NameNode");
     public static final Logger LOG = LogFormatter.getLogger("org.apache.hadoop.dfs.NameNode");
+    public static final Logger stateChangeLog = LogFormatter.getLogger( "org.apache.hadoop.dfs.StateChange");
 
 
     private FSNamesystem namesystem;
     private FSNamesystem namesystem;
     private Server server;
     private Server server;
@@ -182,7 +183,9 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
                                boolean overwrite,
                                boolean overwrite,
                                short replication
                                short replication
     ) throws IOException {
     ) throws IOException {
-        Object results[] = namesystem.startFile(new UTF8(src), 
+       stateChangeLog.fine("*DIR* NameNode.create: file "
+            +src+" for "+clientName+" at "+clientMachine);
+       Object results[] = namesystem.startFile(new UTF8(src), 
                                                 new UTF8(clientName), 
                                                 new UTF8(clientName), 
                                                 new UTF8(clientMachine), 
                                                 new UTF8(clientMachine), 
                                                 overwrite,
                                                 overwrite,
@@ -202,6 +205,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
      */
      */
     public LocatedBlock addBlock(String src, 
     public LocatedBlock addBlock(String src, 
                                  String clientName) throws IOException {
                                  String clientName) throws IOException {
+        stateChangeLog.fine("*BLOCK* NameNode.addBlock: file "
+            +src+" for "+clientName);
         UTF8 src8 = new UTF8(src);
         UTF8 src8 = new UTF8(src);
         UTF8 client8 = new UTF8(clientName);
         UTF8 client8 = new UTF8(clientName);
         Object[] results = namesystem.getAdditionalBlock(src8, client8);
         Object[] results = namesystem.getAdditionalBlock(src8, client8);
@@ -216,8 +221,12 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
      * to prevent weird heartbeat race conditions.
      * to prevent weird heartbeat race conditions.
      */
      */
     public void reportWrittenBlock(LocatedBlock lb) throws IOException {
     public void reportWrittenBlock(LocatedBlock lb) throws IOException {
-        Block b = lb.getBlock();
+        Block b = lb.getBlock();        
         DatanodeInfo targets[] = lb.getLocations();
         DatanodeInfo targets[] = lb.getLocations();
+        stateChangeLog.fine("*BLOCK* NameNode.reportWrittenBlock"
+                +": " + b.getBlockName() +" is written to "
+                +targets.length + " locations" );
+
         for (int i = 0; i < targets.length; i++) {
         for (int i = 0; i < targets.length; i++) {
             namesystem.blockReceived(b, targets[i].getName());
             namesystem.blockReceived(b, targets[i].getName());
         }
         }
@@ -227,6 +236,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
      * The client needs to give up on the block.
      * The client needs to give up on the block.
      */
      */
     public void abandonBlock(Block b, String src) throws IOException {
     public void abandonBlock(Block b, String src) throws IOException {
+        stateChangeLog.fine("*BLOCK* NameNode.abandonBlock: "
+                +b.getBlockName()+" of file "+src );
         if (! namesystem.abandonBlock(b, new UTF8(src))) {
         if (! namesystem.abandonBlock(b, new UTF8(src))) {
             throw new IOException("Cannot abandon block during write to " + src);
             throw new IOException("Cannot abandon block during write to " + src);
         }
         }
@@ -235,11 +246,13 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
      */
      */
     public void abandonFileInProgress(String src, 
     public void abandonFileInProgress(String src, 
                                       String holder) throws IOException {
                                       String holder) throws IOException {
+        stateChangeLog.fine("*DIR* NameNode.abandonFileInProgress:" + src );
         namesystem.abandonFileInProgress(new UTF8(src), new UTF8(holder));
         namesystem.abandonFileInProgress(new UTF8(src), new UTF8(holder));
     }
     }
     /**
     /**
      */
      */
     public boolean complete(String src, String clientName) throws IOException {
     public boolean complete(String src, String clientName) throws IOException {
+        stateChangeLog.fine("*DIR* NameNode.complete: " + src + " for " + clientName );
         int returnCode = namesystem.completeFile(new UTF8(src), new UTF8(clientName));
         int returnCode = namesystem.completeFile(new UTF8(src), new UTF8(clientName));
         if (returnCode == STILL_WAITING) {
         if (returnCode == STILL_WAITING) {
             return false;
             return false;
@@ -269,12 +282,14 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     /**
     /**
      */
      */
     public boolean rename(String src, String dst) throws IOException {
     public boolean rename(String src, String dst) throws IOException {
+        stateChangeLog.fine("*DIR* NameNode.rename: " + src + " to " + dst );
         return namesystem.renameTo(new UTF8(src), new UTF8(dst));
         return namesystem.renameTo(new UTF8(src), new UTF8(dst));
     }
     }
 
 
     /**
     /**
      */
      */
     public boolean delete(String src) throws IOException {
     public boolean delete(String src) throws IOException {
+        stateChangeLog.fine("*DIR* NameNode.delete: " + src );
         return namesystem.delete(new UTF8(src));
         return namesystem.delete(new UTF8(src));
     }
     }
 
 
@@ -293,6 +308,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     /**
     /**
      */
      */
     public boolean mkdirs(String src) throws IOException {
     public boolean mkdirs(String src) throws IOException {
+        stateChangeLog.fine("*DIR* NameNode.mkdirs: " + src );
         return namesystem.mkdirs(new UTF8(src));
         return namesystem.mkdirs(new UTF8(src));
     }
     }
 
 
@@ -404,6 +420,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
     }
     }
 
 
     public Block[] blockReport(String sender, Block blocks[]) {
     public Block[] blockReport(String sender, Block blocks[]) {
+        stateChangeLog.fine("*BLOCK* NameNode.blockReport: "
+                +"from "+sender+" "+blocks.length+" blocks" );
         if( firstBlockReportTime==0)
         if( firstBlockReportTime==0)
               firstBlockReportTime=System.currentTimeMillis();
               firstBlockReportTime=System.currentTimeMillis();
 
 
@@ -411,6 +429,8 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
      }
      }
 
 
     public void blockReceived(String sender, Block blocks[]) {
     public void blockReceived(String sender, Block blocks[]) {
+        stateChangeLog.fine("*BLOCK* NameNode.blockReceived: "
+                +"from "+sender+" "+blocks.length+" blocks." );
         for (int i = 0; i < blocks.length; i++) {
         for (int i = 0; i < blocks.length; i++) {
             namesystem.blockReceived(blocks[i], new UTF8(sender));
             namesystem.blockReceived(blocks[i], new UTF8(sender));
         }
         }
@@ -441,6 +461,19 @@ public class NameNode implements ClientProtocol, DatanodeProtocol, FSConstants {
           System.err.println("Formatted "+dir);
           System.err.println("Formatted "+dir);
           System.exit(0);
           System.exit(0);
         }
         }
+            
+        LogFormatter.initFileHandler( conf, "namenode" );
+        LogFormatter.setShowThreadIDs(true);
+        String confLevel = conf.get("dfs.namenode.logging.level", "info");
+        Level level;
+        if( confLevel.equals( "dir"))
+                level=Level.FINE;
+        else if( confLevel.equals( "block"))
+                level=Level.FINER;
+        else if( confLevel.equals( "all"))
+                level=Level.FINEST;
+        else level=Level.INFO;
+        stateChangeLog.setLevel( level);
 
 
         NameNode namenode = new NameNode(conf);
         NameNode namenode = new NameNode(conf);
         namenode.join();
         namenode.join();

+ 3 - 1
src/java/org/apache/hadoop/mapred/JobTracker.java

@@ -1015,6 +1015,8 @@ public class JobTracker implements MRConstants, InterTrackerProtocol, JobSubmiss
           System.exit(-1);
           System.exit(-1);
         }
         }
 
 
-        startTracker(new Configuration());
+        Configuration conf=new Configuration();
+        LogFormatter.initFileHandler( conf, "jobtracker" );
+        startTracker(conf);
     }
     }
 }
 }

+ 3 - 1
src/java/org/apache/hadoop/mapred/TaskTracker.java

@@ -839,7 +839,9 @@ public class TaskTracker implements MRConstants, TaskUmbilicalProtocol, MapOutpu
             System.exit(-1);
             System.exit(-1);
         }
         }
 
 
-        TaskTracker tt = new TaskTracker(new JobConf());
+        JobConf conf=new JobConf();
+        LogFormatter.initFileHandler( conf, "tasktracker" );
+        TaskTracker tt = new TaskTracker(conf);
         tt.run();
         tt.run();
     }
     }
 }
 }

+ 66 - 3
src/java/org/apache/hadoop/util/LogFormatter.java

@@ -18,9 +18,12 @@ package org.apache.hadoop.util;
 
 
 import java.util.logging.*;
 import java.util.logging.*;
 import java.io.*;
 import java.io.*;
+import java.net.InetAddress;
 import java.text.*;
 import java.text.*;
 import java.util.Date;
 import java.util.Date;
 
 
+import org.apache.hadoop.conf.Configuration;
+
 /** Prints just the date and the log message. */
 /** Prints just the date and the log message. */
 
 
 public class LogFormatter extends Formatter {
 public class LogFormatter extends Formatter {
@@ -34,7 +37,7 @@ public class LogFormatter extends Formatter {
 
 
   private static boolean showTime = true;
   private static boolean showTime = true;
   private static boolean showThreadIDs = false;
   private static boolean showThreadIDs = false;
-
+  
   // install when this class is loaded
   // install when this class is loaded
   static {
   static {
     Handler[] handlers = LogFormatter.getLogger("").getHandlers();
     Handler[] handlers = LogFormatter.getLogger("").getHandlers();
@@ -44,6 +47,63 @@ public class LogFormatter extends Formatter {
     }
     }
   }
   }
 
 
+  public static String initFileHandler( Configuration conf, String opName )
+      throws IOException {
+          String logDir=System.getenv("HADOOP_LOG_DIR");
+          String userHome=System.getProperty("user.dir");
+          if( logDir==null ) {
+        	  logDir=System.getenv("HADOOP_HOME");
+        	  if(logDir==null) {
+        		  logDir=userHome;
+        	  } else {
+                  logDir+=File.separator+"logs";   
+              }
+          }
+          
+          if(!logDir.equals(userHome)) {
+              File logDirFile = new File( logDir );
+              if(!logDirFile.exists()) {
+                  if(!logDirFile.mkdirs()) {
+                      logDir=userHome;
+                  }
+              } else if( !logDirFile.isDirectory()) {
+                  logDir=userHome;
+              }
+          }
+          
+          String hostname;
+          try {
+          	hostname=InetAddress.getLocalHost().getHostName();
+          	int index=hostname.indexOf('.');
+          	if( index != -1 ) {
+          		hostname=hostname.substring(0, index);
+          	}
+          } catch (java.net.UnknownHostException e) {
+          	hostname="localhost";
+          }
+          
+          String logFile = logDir+File.separator+"hadoop-"+System.getProperty( "user.name" )
+               +"-"+opName+"-"+hostname+".log";
+
+          int logFileSize = conf.getInt( "hadoop.logfile.size", 10000000 );
+          int logFileCount = conf.getInt( "hadoop.logfile.count", 10 );
+          
+          FileHandler fh=new FileHandler(logFile, logFileSize, logFileCount, false);
+          fh.setFormatter(new LogFormatter());
+          fh.setLevel(Level.FINEST);
+          
+          Logger rootLogger = LogFormatter.getLogger("");
+          rootLogger.info( "directing logs to directory "+logDir );
+          
+          Handler[] handlers = rootLogger.getHandlers();
+          for( int i=0; i<handlers.length; i++ ) {
+          	rootLogger.removeHandler( handlers[i]);
+          }
+          rootLogger.addHandler(fh);
+          
+          return logFile;
+  }
+      
   /** Gets a logger and, as a side effect, installs this as the default
   /** Gets a logger and, as a side effect, installs this as the default
    * formatter. */
    * formatter. */
   public static Logger getLogger(String name) {
   public static Logger getLogger(String name) {
@@ -77,8 +137,11 @@ public class LogFormatter extends Formatter {
     
     
     // the thread id
     // the thread id
     if (showThreadIDs) {
     if (showThreadIDs) {
-      buffer.append(" ");
-      buffer.append(record.getThreadID());
+      buffer.append(" 0x");
+      String threadID = Integer.toHexString(record.getThreadID());
+      for (int i = 0; i < 8 - threadID.length(); i++) 
+        buffer.append('0');
+      buffer.append(threadID);
     }
     }
 
 
     // handle SEVERE specially
     // handle SEVERE specially

+ 467 - 0
src/test/org/apache/hadoop/dfs/ClusterTestDFSNamespaceLogging.java

@@ -0,0 +1,467 @@
+/**
+ * Copyright 2005 The Apache Software Foundation
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.dfs;
+
+import junit.framework.TestCase;
+import junit.framework.AssertionFailedError;
+import org.apache.hadoop.fs.FSOutputStream;
+import org.apache.hadoop.fs.FileUtil;
+import org.apache.hadoop.io.UTF8;
+import org.apache.hadoop.util.LogFormatter;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.dfs.NameNode;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileNotFoundException;
+import java.io.FileReader;
+import java.io.IOException;
+import java.net.InetSocketAddress;
+import java.util.ArrayList;
+import java.util.logging.Level;
+import java.util.logging.Logger;
+
+/**
+ * Test DFS logging
+ * make sure that any namespace mutations are logged.
+ * @author Hairong Kuang
+ */
+public class ClusterTestDFSNamespaceLogging extends TestCase implements FSConstants {
+  private static final Logger LOG =
+      LogFormatter.getLogger("org.apache.hadoop.dfs.ClusterTestDFS");
+
+  private static Configuration conf = new Configuration();
+
+  /**
+   * all DFS test files go under this base directory
+   */
+  private static String baseDirSpecified=conf.get("test.dfs.data", "/tmp/test-dfs");;
+
+  /**
+   * base dir as File
+   */
+  private static File baseDir=new File(baseDirSpecified);
+  
+  /**
+   * name node port
+   */
+  int nameNodePort = conf.getInt("dfs.namenode.port", 9020);
+  
+  /** DFS client, datanodes, and namenode
+   */
+  DFSClient dfsClient;
+  ArrayList dataNodeDaemons = new ArrayList();
+  NameNode nameNodeDaemon;
+  
+  /** Log header length
+   */
+  private static final int DIR_LOG_HEADER_LEN = 30;
+  private static final int BLOCK_LOG_HEADER_LEN = 32;
+  /** DFS block size
+   */
+  private static final int BLOCK_SIZE = 32*1000*1000;
+  
+  /** Buffer size
+   */
+  private static final int BUFFER_SIZE = 4096;
+
+  private BufferedReader logfh;
+  private String logFile;
+  
+  protected void setUp() throws Exception {
+    super.setUp();
+    conf.setBoolean("test.dfs.same.host.targets.allowed", true);
+  }
+
+ /**
+  * Remove old files from temp area used by this test case and be sure
+  * base temp directory can be created.
+  */
+  protected void prepareTempFileSpace() {
+    if (baseDir.exists()) {
+      try { // start from a blank state
+        FileUtil.fullyDelete(baseDir);
+      } catch (Exception ignored) {
+      }
+    }
+    baseDir.mkdirs();
+    if (!baseDir.isDirectory()) {
+      throw new RuntimeException("Value of root directory property" 
+          + "test.dfs.data for dfs test is not a directory: "
+          + baseDirSpecified);
+    }
+  }
+
+  /**
+   * Pseudo Distributed FS Test.
+   * Test DFS by running all the necessary daemons in one process.
+   *
+   * @throws Exception
+   */
+  public void testFsPseudoDistributed() throws Exception {
+	  // test on a small cluster with 3 data nodes
+	  testFsPseudoDistributed(3);
+  }
+  
+  private void testFsPseudoDistributed( int datanodeNum ) throws Exception {
+    try {
+      prepareTempFileSpace();
+
+      configureDFS();
+      startDFS(datanodeNum);
+
+      if( logfh == null )
+        try {
+          logfh = new BufferedReader( new FileReader( logFile ) );
+        } catch (FileNotFoundException e) {
+          // TODO Auto-generated catch block
+          throw new AssertionFailedError("Log file does not exist: "+logFile);
+        }
+    
+      // create a directory
+      try {
+        dfsClient.mkdirs( new UTF8( "/data") );
+        assertMkdirs( "/data", false );
+      } catch ( IOException ioe ) {
+      	ioe.printStackTrace();
+      }
+       
+      try {
+        dfsClient.mkdirs( new UTF8( "data") );
+        assertMkdirs( "data", true );
+      } catch ( IOException ioe ) {
+       	ioe.printStackTrace();
+      }
+      
+      //
+      // create a file with 1 data block
+      try {
+        createFile("/data/xx", 1);
+        assertCreate( "/data/xx", 1, false );
+      } catch( IOException ioe ) {
+    	assertCreate( "/data/xx", 1, true );
+      }
+    
+      // create a file with 2 data blocks
+      try {
+        createFile("/data/yy",BLOCK_SIZE+1);
+        assertCreate( "/data/yy", BLOCK_SIZE+1, false );
+      } catch( IOException ioe ) {
+    	assertCreate( "/data/yy", BLOCK_SIZE+1, true );
+      }
+
+      // create an existing file
+      try {
+        createFile("/data/xx", 2);
+        assertCreate( "/data/xx", 2, false );
+      } catch( IOException ioe ) {
+      	assertCreate( "/data/xx", 2, true );
+      }
+    
+      // delete the file
+      try {
+        dfsClient.delete( new UTF8("/data/yy") );
+        assertDelete("/data/yy", false);
+      } catch( IOException ioe ) {
+        ioe.printStackTrace();
+      }
+
+    
+      // rename the file
+      try {
+        dfsClient.rename( new UTF8("/data/xx"), new UTF8("/data/yy") );
+        assertRename( "/data/xx", "/data/yy", false );
+      } catch( IOException ioe ) {
+      	ioe.printStackTrace();
+      }
+
+      try {
+        dfsClient.delete(new UTF8("/data/xx"));
+        assertDelete("/data/xx", true);
+      } catch(IOException ioe) {
+    	ioe.printStackTrace();
+      }
+      
+      try {
+        dfsClient.rename( new UTF8("/data/xx"), new UTF8("/data/yy") );    
+        assertRename( "/data/xx", "/data/yy", true );
+      } catch( IOException ioe) {
+    	ioe.printStackTrace();
+      }
+        
+    } catch (AssertionFailedError afe) {
+      afe.printStackTrace();
+      throw afe;
+    } catch (Throwable t) {
+      msg("Unexpected exception_a: " + t);
+      t.printStackTrace();
+    } finally {
+      shutdownDFS();
+
+    }
+  }
+
+  private void createFile( String filename, long fileSize ) throws IOException { 
+    //
+    //           write filesize of data to file
+    //
+    byte[] buffer = new byte[BUFFER_SIZE];
+    UTF8 testFileName = new UTF8(filename); // hardcode filename
+    FSOutputStream nos;
+	nos = dfsClient.create(testFileName, false);
+    try {
+      for (long nBytesWritten = 0L;
+                nBytesWritten < fileSize;
+                nBytesWritten += buffer.length) {
+        if ((nBytesWritten + buffer.length) > fileSize) {
+          int pb = (int) (fileSize - nBytesWritten);
+          byte[] bufferPartial = new byte[pb];
+          for( int i=0; i<pb; i++) {
+            bufferPartial[i]='a';
+          }
+          nos.write(buffer);
+        } else {
+          for( int i=0; i<buffer.length;i++) {
+            buffer[i]='a';
+          }
+          nos.write(buffer);
+        }
+      }
+    } finally {
+      nos.flush();
+      nos.close();
+    }
+  }
+
+  private void assertMkdirs( String fileName, boolean failed ) {
+	  assertHasLogged("NameNode.mkdirs: " +fileName, DIR_LOG_HEADER_LEN+1);
+	  assertHasLogged("NameSystem.mkdirs: "+fileName, DIR_LOG_HEADER_LEN);
+	  if( failed )
+		assertHasLogged("FSDirectory.mkdirs: "
+        			+"failed to create directory "+fileName, DIR_LOG_HEADER_LEN);
+	  else
+	    assertHasLogged( "FSDirectory.mkdirs: created directory "+fileName, DIR_LOG_HEADER_LEN);
+  }
+  
+  private void assertCreate( String fileName, int filesize, boolean failed ) {
+	  assertHasLogged("NameNode.create: file "+fileName, DIR_LOG_HEADER_LEN+1);
+	  assertHasLogged("NameSystem.startFile: file "+fileName, DIR_LOG_HEADER_LEN);
+	  if( failed ) {
+		assertHasLogged("NameSystem.startFile: "
+            		  +"failed to create file " + fileName, DIR_LOG_HEADER_LEN);
+	  } else {
+	    assertHasLogged("NameSystem.allocateBlock: "+fileName, BLOCK_LOG_HEADER_LEN);
+	    int blockNum = (filesize/BLOCK_SIZE*BLOCK_SIZE==filesize)?
+		  filesize/BLOCK_SIZE : 1+filesize/BLOCK_SIZE;
+	    for( int i=1; i<blockNum; i++) {
+		  assertHasLogged("NameNode.addBlock: file "+fileName, BLOCK_LOG_HEADER_LEN+1);
+		  assertHasLogged("NameSystem.getAdditionalBlock: file "+fileName, BLOCK_LOG_HEADER_LEN);
+		  assertHasLogged("NameSystem.allocateBlock: "+fileName, BLOCK_LOG_HEADER_LEN);
+	    }
+	    assertHasLogged("NameNode.complete: "+fileName, DIR_LOG_HEADER_LEN+1);
+	    assertHasLogged("NameSystem.completeFile: "+fileName, DIR_LOG_HEADER_LEN);
+	    assertHasLogged("FSDirectory.addFile: "+fileName+" with "
+			  +blockNum+" blocks is added to the file system", DIR_LOG_HEADER_LEN);
+	    assertHasLogged("NameSystem.completeFile: "+fileName
+			  +" is removed from pendingCreates", DIR_LOG_HEADER_LEN);
+	  }
+  }
+  
+  private void assertDelete( String fileName, boolean failed ) {
+	  assertHasLogged("NameNode.delete: "+fileName, DIR_LOG_HEADER_LEN+1);
+      assertHasLogged("NameSystem.delete: "+fileName, DIR_LOG_HEADER_LEN);
+      assertHasLogged("FSDirectory.delete: "+fileName, DIR_LOG_HEADER_LEN);
+      if( failed )
+        assertHasLogged("FSDirectory.unprotectedDelete: "
+            +"failed to remove "+fileName, DIR_LOG_HEADER_LEN );
+      else
+        assertHasLogged("FSDirectory.unprotectedDelete: "
+            +fileName+" is removed", DIR_LOG_HEADER_LEN);
+  }
+  
+  private void assertRename( String src, String dst, boolean failed ) {
+	  assertHasLogged("NameNode.rename: "+src+" to "+dst, DIR_LOG_HEADER_LEN+1);
+	  assertHasLogged("NameSystem.renameTo: "+src+" to "+dst, DIR_LOG_HEADER_LEN );
+	  assertHasLogged("FSDirectory.renameTo: "+src+" to "+dst, DIR_LOG_HEADER_LEN );
+	  if( failed )
+		assertHasLogged("FSDirectory.unprotectedRenameTo: "
+                         +"failed to rename "+src+" to "+dst, DIR_LOG_HEADER_LEN);
+	  else
+	    assertHasLogged("FSDirectory.unprotectedRenameTo: "
+                       +src+" is renamed to "+dst, DIR_LOG_HEADER_LEN );
+  }
+  
+  private void assertHasLogged( String target, int headerLen ) {
+	  String line;
+	  boolean notFound = true;
+	  try {
+	      while( notFound && (line=logfh.readLine()) != null ) {
+		      if(line.length()>headerLen && line.startsWith(target, headerLen))
+			      notFound = false;
+	      }
+	  } catch(java.io.IOException e) {
+		  throw new AssertionFailedError("error reading the log file");
+	  }
+	  if(notFound) {
+		  throw new AssertionFailedError(target+" not logged");
+	  }
+  }
+
+  //
+  //     modify config for test
+  //
+  private void configureDFS() throws IOException {
+	// set given config param to override other config settings
+	conf.setInt("test.dfs.block_size", BLOCK_SIZE);
+	// verify that config changed
+	assertTrue(BLOCK_SIZE == conf.getInt("test.dfs.block_size", 2)); // 2 is an intentional obviously-wrong block size
+	// downsize for testing (just to save resources)
+	conf.setInt("dfs.namenode.handler.count", 3);
+	conf.setLong("dfs.blockreport.intervalMsec", 50*1000L);
+	conf.setLong("dfs.datanode.startupMsec", 15*1000L);
+	conf.setInt("dfs.replication", 2);
+	//System.setProperty("HADOOP_LOG_DIR", baseDirSpecified+"/logs");
+	conf.setInt("hadoop.logfile.count", 1);
+	conf.setInt("hadoop.logfile.size", 1000000000);
+
+	// logging configuration for namenode
+    logFile = LogFormatter.initFileHandler( conf, "namenode" );
+    LogFormatter.setShowThreadIDs(true);
+    NameNode.stateChangeLog.setLevel( Level.FINEST);
+  }
+  
+  private void startDFS( int dataNodeNum) throws IOException {
+    //
+    //          start a NameNode
+    String nameNodeSocketAddr = "localhost:" + nameNodePort;
+    conf.set("fs.default.name", nameNodeSocketAddr);
+    
+    String nameFSDir = baseDirSpecified + "/name";
+	conf.set("dfs.name.dir", nameFSDir);
+	
+    NameNode.format(conf);
+    
+    nameNodeDaemon = new NameNode(new File(nameFSDir), nameNodePort, conf);
+
+     //
+      //        start DataNodes
+      //
+      for (int i = 0; i < dataNodeNum; i++) {
+        // uniquely config real fs path for data storage for this datanode
+        String dataDir = baseDirSpecified + "/datanode" + i;
+        conf.set("dfs.data.dir", dataDir);
+        DataNode dn = DataNode.makeInstanceForDir(dataDir, conf);
+        if (dn != null) {
+          dataNodeDaemons.add(dn);
+          (new Thread(dn, "DataNode" + i + ": " + dataDir)).start();
+        }
+      }
+	         
+      assertTrue("incorrect datanodes for test to continue",
+            (dataNodeDaemons.size() == dataNodeNum));
+      //
+      //          wait for datanodes to report in
+      try {
+        awaitQuiescence();
+      } catch( InterruptedException e) {
+    	  e.printStackTrace();
+      }
+      
+      //  act as if namenode is a remote process
+      dfsClient = new DFSClient(new InetSocketAddress("localhost", nameNodePort), conf);
+  }
+
+  private void shutdownDFS() {
+      // shutdown client
+      if (dfsClient != null) {
+        try {
+          msg("close down subthreads of DFSClient");
+          dfsClient.close();
+        } catch (Exception ignored) { }
+        msg("finished close down of DFSClient");
+      }
+
+      //
+      // shut down datanode daemons (this takes advantage of being same-process)
+      msg("begin shutdown of all datanode daemons" );
+
+      for (int i = 0; i < dataNodeDaemons.size(); i++) {
+        DataNode dataNode = (DataNode) dataNodeDaemons.get(i);
+        try {
+          dataNode.shutdown();
+        } catch (Exception e) {
+           msg("ignoring exception during (all) datanode shutdown, e=" + e);
+        }
+      }
+      msg("finished shutdown of all datanode daemons");
+      
+      // shutdown namenode
+      msg("begin shutdown of namenode daemon");
+      try {
+        nameNodeDaemon.stop();
+      } catch (Exception e) {
+        msg("ignoring namenode shutdown exception=" + e);
+      }
+      msg("finished shutdown of namenode daemon");
+  }
+  
+  /** Wait for the DFS datanodes to become quiescent.
+   * The initial implementation is to sleep for some fixed amount of time,
+   * but a better implementation would be to really detect when distributed
+   * operations are completed.
+   * @throws InterruptedException
+   */
+  private void awaitQuiescence() throws InterruptedException {
+    // ToDo: Need observer pattern, not static sleep
+    // Doug suggested that the block report interval could be made shorter
+    //   and then observing that would be a good way to know when an operation
+    //   was complete (quiescence detect).
+    sleepAtLeast(30000);
+  }
+
+  private void msg(String s) {
+    //System.out.println(s);
+    LOG.info(s);
+  }
+
+  public static void sleepAtLeast(int tmsec) {
+    long t0 = System.currentTimeMillis();
+    long t1 = t0;
+    long tslept = t1 - t0;
+    while (tmsec > tslept) {
+      try {
+        long tsleep = tmsec - tslept;
+        Thread.sleep(tsleep);
+        t1 = System.currentTimeMillis();
+      }  catch (InterruptedException ie) {
+        t1 = System.currentTimeMillis();
+      }
+      tslept = t1 - t0;
+    }
+  }
+
+  public static void main(String[] args) throws Exception {
+    String usage = "Usage: ClusterTestDFSNameSpaceChangeLogging (no args)";
+    if (args.length != 0) {
+      System.err.println(usage);
+      System.exit(-1);
+    }
+    String[] testargs = {"org.apache.hadoop.dfs.ClusterTestDFSNameSpaceChangeLogging"};
+    junit.textui.TestRunner.main(testargs);
+  }
+
+}