Explorar el Código

Clear tasktracker's file cache before it re-initializes, to avoid confusion. Contributed by Owen.

git-svn-id: https://svn.apache.org/repos/asf/lucene/hadoop/trunk@549200 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting hace 18 años
padre
commit
cb070e9ff7

+ 3 - 0
CHANGES.txt

@@ -182,6 +182,9 @@ Trunk (unreleased changes)
  56. HADOOP-1207.  Fix FsShell's 'rm' command to not stop when one of
  56. HADOOP-1207.  Fix FsShell's 'rm' command to not stop when one of
      the named files does not exist.  (Tsz Wo Sze via cutting)
      the named files does not exist.  (Tsz Wo Sze via cutting)
 
 
+ 57. HADOOP-1475.  Clear tasktracker's file cache before it
+     re-initializes, to avoid confusion.  (omalley via cutting)
+
 
 
 Release 0.13.0 - 2007-06-08
 Release 0.13.0 - 2007-06-08
 
 

+ 20 - 0
src/java/org/apache/hadoop/filecache/DistributedCache.java

@@ -40,6 +40,8 @@ public class DistributedCache {
   private static TreeMap<String, CacheStatus> cachedArchives = new TreeMap<String, CacheStatus>();
   private static TreeMap<String, CacheStatus> cachedArchives = new TreeMap<String, CacheStatus>();
   // buffer size for reading checksum files
   // buffer size for reading checksum files
   private static final int CRC_BUFFER_SIZE = 64 * 1024;
   private static final int CRC_BUFFER_SIZE = 64 * 1024;
+  private static final Log LOG =
+    LogFactory.getLog(DistributedCache.class);
   
   
   /**
   /**
    * 
    * 
@@ -679,4 +681,22 @@ public class DistributedCache {
     public byte[] md5;
     public byte[] md5;
   }
   }
 
 
+  /**
+   * Clear the entire contents of the cache and delete the backing files. This
+   * should only be used when the server is reinitializing, because the users
+   * are going to lose their files.
+   */
+  public static void purgeCache(Configuration conf) throws IOException {
+    synchronized (cachedArchives) {
+      FileSystem localFs = FileSystem.getLocal(conf);
+      for (Map.Entry<String,CacheStatus> f: cachedArchives.entrySet()) {
+        try {
+          localFs.delete(f.getValue().localLoadPath);
+        } catch (IOException ie) {
+          LOG.debug("Error cleaning up cache", ie);
+        }
+      }
+      cachedArchives.clear();
+    }
+  }
 }
 }

+ 2 - 0
src/java/org/apache/hadoop/mapred/TaskTracker.java

@@ -45,6 +45,7 @@ import javax.servlet.http.HttpServletResponse;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.filecache.DistributedCache;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.DF;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSError;
 import org.apache.hadoop.fs.FSError;
@@ -349,6 +350,7 @@ public class TaskTracker
     LOG.info("Starting tracker " + taskTrackerName);
     LOG.info("Starting tracker " + taskTrackerName);
 
 
     // Clear out temporary files that might be lying around
     // Clear out temporary files that might be lying around
+    DistributedCache.purgeCache(this.fConf);
     this.mapOutputFile.cleanupStorage();
     this.mapOutputFile.cleanupStorage();
     this.justStarted = true;
     this.justStarted = true;