Browse Source

HDFS-3940. Add Gset#clear method and clear the block map when namenode is shutdown. Contributed by Suresh Srinivas.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1@1468891 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 12 years ago
parent
commit
d2cdb096fc

+ 3 - 0
CHANGES.txt

@@ -239,6 +239,9 @@ Release 1.2.0 - unreleased
     HDFS-4635. Move BlockManager#computeCapacity to LightWeightGSet.
     (suresh)
 
+    HDFS-3940. Add Gset#clear method and clear the block map when 
+    namenode is shutdown. (suresh)
+
   BUG FIXES
 
     HADOOP-9467. Metrics2 record filter should check name as well as tags.

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/server/namenode/BlocksMap.java

@@ -321,7 +321,7 @@ class BlocksMap {
   }
 
   void close() {
-    blocks = null;
+    blocks.clear();
   }
 
   /**

+ 4 - 0
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSDirectory.java

@@ -1500,4 +1500,8 @@ class FSDirectory implements FSConstants, Closeable {
       inode.setLocalName(name.getBytes());
     }
   }
+  
+  void shutdown() {
+    nameCache.reset();
+  }
 }

+ 7 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -5827,10 +5827,15 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
    * shutdown FSNamesystem
    */
   public void shutdown() {
-    if (mbeanName != null)
+    if (mbeanName != null) {
       MBeans.unregister(mbeanName);
-    if (mxBean != null)
+    }
+    if (mxBean != null) {
       MBeans.unregister(mxBean);
+    }
+    if (dir != null) {
+      dir.shutdown();
+    }
   }
   
 

+ 10 - 0
src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameCache.java

@@ -152,4 +152,14 @@ class NameCache<K> {
     cache.put(name, name);
     lookups += useThreshold;
   }
+  
+  public void reset() {
+    initialized = false;
+    cache.clear();
+    if (transientMap == null) {
+      transientMap = new HashMap<K, UseCount>();
+    } else {
+      transientMap.clear();
+    }
+  }
 }

+ 3 - 1
src/hdfs/org/apache/hadoop/hdfs/util/GSet.java

@@ -78,4 +78,6 @@ public interface GSet<K, E extends K> extends Iterable<E> {
     * @throws NullPointerException if key == null.
   */
   E remove(K key);
-}
+
+  void clear();
+}

+ 5 - 0
src/hdfs/org/apache/hadoop/hdfs/util/GSetByHashMap.java

@@ -62,4 +62,9 @@ public class GSetByHashMap<K, E extends K> implements GSet<K, E> {
   public Iterator<E> iterator() {
     return m.values().iterator();
   }
+  
+  @Override
+  public void clear() {
+    m.clear();
+  }
 }

+ 11 - 6
src/hdfs/org/apache/hadoop/hdfs/util/LightWeightGSet.java

@@ -321,12 +321,17 @@ public class LightWeightGSet<K, E extends K> implements GSet<K, E> {
     final int exponent = e2 < 0? 0: e2 > 30? 30: e2;
     final int c = 1 << exponent;
 
-    if (LightWeightGSet.LOG.isDebugEnabled()) {
-      LOG.debug("Computing capacity for map " + mapName);
-      LOG.debug("VM type       = " + vmBit + "-bit");
-      LOG.debug(percentage + "% max memory = " + maxMemory);
-      LOG.debug("capacity      = 2^" + exponent + " = " + c + " entries");
-    }
+    LOG.info("Computing capacity for map " + mapName);
+    LOG.info("VM type       = " + vmBit + "-bit");
+    LOG.info(percentage + "% max memory = " + maxMemory);
+    LOG.info("capacity      = 2^" + exponent + " = " + c + " entries");
     return c;
   }
+  
+  public void clear() {
+    for (int i = 0; i < entries.length; i++) {
+      entries[i] = null;
+    }
+    size = 0;
+  }
 }

+ 5 - 0
src/test/org/apache/hadoop/hdfs/util/TestGSet.java

@@ -386,6 +386,11 @@ public class TestGSet {
       return String.format(" iterate=%5d, contain=%5d, time elapsed=%5d.%03ds",
           iterate_count, contain_count, t/1000, t%1000);
     }
+
+    @Override
+    public void clear() {
+      gset.clear();
+    }
   }
 
   /** Test data set */