Просмотр исходного кода

Rename NNStorageArchivalManager to NNStorageRetentionManager per Matt Foley's comments

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1073@1151670 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 13 лет назад
Родитель
Сommit
5ecb57e484

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/BackupJournalManager.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 
 /**

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSEditLog.java

@@ -36,7 +36,7 @@ import org.apache.hadoop.hdfs.server.common.HdfsConstants.NamenodeRole;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import static org.apache.hadoop.hdfs.server.common.Util.now;
 import org.apache.hadoop.hdfs.server.namenode.NNStorage.NameNodeDirType;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
 import org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.RemoteEditLogManifest;

+ 2 - 2
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FSImage.java

@@ -91,7 +91,7 @@ public class FSImage implements Closeable {
 
   final private Configuration conf;
 
-  private final NNStorageArchivalManager archivalManager; 
+  private final NNStorageRetentionManager archivalManager; 
 
   /**
    * Construct an FSImage.
@@ -149,7 +149,7 @@ public class FSImage implements Closeable {
     this.editLog = new FSEditLog(storage);
     setFSNamesystem(ns);
     
-    archivalManager = new NNStorageArchivalManager(conf, storage, editLog);
+    archivalManager = new NNStorageRetentionManager(conf, storage, editLog);
   }
 
   protected FSNamesystem getFSNamesystem() {

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/FileJournalManager.java

@@ -27,7 +27,7 @@ import java.util.List;
 import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory;
 import org.apache.hadoop.hdfs.server.namenode.FSImageTransactionalStorageInspector.FoundEditLog;
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;

+ 1 - 1
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/JournalManager.java

@@ -19,7 +19,7 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.IOException;
 
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
 
 /**
  * A JournalManager is responsible for managing a single place of storing

+ 6 - 5
hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageArchivalManager.java → hdfs/src/java/org/apache/hadoop/hdfs/server/namenode/NNStorageRetentionManager.java

@@ -35,7 +35,7 @@ import com.google.common.collect.Lists;
 import com.google.common.collect.Sets;
 
 /**
- * The NNStorageArchivalManager is responsible for inspecting the storage
+ * The NNStorageRetentionManager is responsible for inspecting the storage
  * directories of the NN and enforcing a retention policy on checkpoints
  * and edit logs.
  * 
@@ -43,15 +43,16 @@ import com.google.common.collect.Sets;
  * implementation, which might delete the files or instead copy them to
  * a filer or HDFS for later analysis.
  */
-public class NNStorageArchivalManager {
+public class NNStorageRetentionManager {
   
   private final int numCheckpointsToRetain;
-  private static final Log LOG = LogFactory.getLog(NNStorageArchivalManager.class);
+  private static final Log LOG = LogFactory.getLog(
+      NNStorageRetentionManager.class);
   private final NNStorage storage;
   private final StoragePurger purger;
   private final FSEditLog editLog;
   
-  public NNStorageArchivalManager(
+  public NNStorageRetentionManager(
       Configuration conf,
       NNStorage storage,
       FSEditLog editLog,
@@ -64,7 +65,7 @@ public class NNStorageArchivalManager {
     this.purger = purger;
   }
   
-  public NNStorageArchivalManager(Configuration conf, NNStorage storage,
+  public NNStorageRetentionManager(Configuration conf, NNStorage storage,
       FSEditLog editLog) {
     this(conf, storage, editLog, new DeletionStoragePurger());
   }

+ 6 - 6
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalFunctional.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionFunctional.java

@@ -38,26 +38,26 @@ import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
 
 
 /**
- * Functional tests for NNStorageArchivalManager. This differs from
- * {@link TestNNStorageArchivalManager} in that the other test suite
+ * Functional tests for NNStorageRetentionManager. This differs from
+ * {@link TestNNStorageRetentionManager} in that the other test suite
  * is only unit/mock-based tests whereas this suite starts miniclusters,
  * etc.
  */
-public class TestNNStorageArchivalFunctional {
+public class TestNNStorageRetentionFunctional {
 
   private static File TEST_ROOT_DIR =
     new File(MiniDFSCluster.getBaseDirectory());
   private static Log LOG = LogFactory.getLog(
-      TestNNStorageArchivalFunctional.class);
+      TestNNStorageRetentionFunctional.class);
 
  /**
   * Test case where two directories are configured as NAME_AND_EDITS
   * and one of them fails to save storage. Since the edits and image
   * failure states are decoupled, the failure of image saving should
-  * not prevent the archival of logs from that dir.
+  * not prevent the purging of logs from that dir.
   */
   @Test
-  public void testArchivingWithNameEditsDirAfterFailure()
+  public void testPurgingWithNameEditsDirAfterFailure()
       throws IOException {
     MiniDFSCluster cluster = null;    
     Configuration conf = new HdfsConfiguration();

+ 4 - 4
hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageArchivalManager.java → hdfs/src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestNNStorageRetentionManager.java

@@ -31,7 +31,7 @@ import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getInProgressEdit
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getFinalizedEditsFileName;
 import static org.apache.hadoop.hdfs.server.namenode.NNStorage.getImageFileName;
 
-import org.apache.hadoop.hdfs.server.namenode.NNStorageArchivalManager.StoragePurger;
+import org.apache.hadoop.hdfs.server.namenode.NNStorageRetentionManager.StoragePurger;
 import org.junit.Assert;
 import org.junit.Test;
 import org.mockito.ArgumentCaptor;
@@ -45,7 +45,7 @@ import com.google.common.collect.Maps;
 import com.google.common.collect.Sets;
 
 
-public class TestNNStorageArchivalManager {
+public class TestNNStorageRetentionManager {
   /**
    * Test the "easy case" where we have more images in the
    * directory than we need to keep. Should purge the
@@ -167,14 +167,14 @@ public class TestNNStorageArchivalManager {
     Configuration conf = new Configuration();
 
     StoragePurger mockPurger =
-      Mockito.mock(NNStorageArchivalManager.StoragePurger.class);
+      Mockito.mock(NNStorageRetentionManager.StoragePurger.class);
     ArgumentCaptor<FoundFSImage> imagesPurgedCaptor =
       ArgumentCaptor.forClass(FoundFSImage.class);    
     ArgumentCaptor<FoundEditLog> logsPurgedCaptor =
       ArgumentCaptor.forClass(FoundEditLog.class);    
 
     // Ask the manager to purge files we don't need any more
-    new NNStorageArchivalManager(conf,
+    new NNStorageRetentionManager(conf,
         tc.mockStorage(), tc.mockEditLog(), mockPurger)
       .purgeOldStorage();