Parcourir la source

MAPREDUCE-5351. Fixed a memory leak in JobTracker due to stable FS objects in FSCache. Contributed by Sandy Ryza.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1@1497963 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy il y a 12 ans
Parent
commit
7c318955e9
2 fichiers modifiés avec 53 ajouts et 1 suppressions
  1. 1 1
      CHANGES.txt
  2. 52 0
      src/test/org/apache/hadoop/mapred/TestCleanupQueue.java

+ 1 - 1
CHANGES.txt

@@ -105,7 +105,7 @@ Release 1.2.1 - Unreleased
     HADOOP-9665. Fixed BlockDecompressorStream#decompress to return -1 rather
     than throw EOF at end of file. (Zhijie Shen via acmurthy)
 
-    HADOOP-5351. Fixed a memory leak in JobTracker due to stable FS objects in
+    MAPREDUCE-5351. Fixed a memory leak in JobTracker due to stable FS objects in
     FSCache. (Sandy Ryza via acmurthy)
 
 Release 1.2.0 - 2013.05.05

+ 52 - 0
src/test/org/apache/hadoop/mapred/TestCleanupQueue.java

@@ -0,0 +1,52 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.mapred;
+
+import java.io.File;
+import java.io.IOException;
+
+import junit.framework.Assert;
+
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.mapred.CleanupQueue.PathDeletionContext;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.junit.Test;
+
+public class TestCleanupQueue {
+  @Test (timeout = 2000)
+  public void testCleanupQueueClosesFilesystem() throws IOException,
+      InterruptedException {
+    File file = new File("afile.txt");
+    file.createNewFile();
+    Path path = new Path(file.getAbsoluteFile().toURI());
+    
+    FileSystem.get(new Configuration());
+    Assert.assertEquals(1, FileSystem.getCacheSize());
+    
+    CleanupQueue cleanupQueue = new CleanupQueue();
+    PathDeletionContext context = new PathDeletionContext(path,
+        new Configuration(), UserGroupInformation.getLoginUser());
+    cleanupQueue.addToQueue(context);
+    
+    while (FileSystem.getCacheSize() > 0) {
+      Thread.sleep(100);
+    }
+  }
+}