소스 검색

HDFS-4057. NameNode.namesystem should be private. Contributed by Brandon Li.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-1@1400551 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 12 년 전
부모
커밋
32c5e8f49f
21개의 변경된 파일49개의 추가작업 그리고 38개의 파일을 삭제
  1. 2 0
      CHANGES.txt
  2. 1 1
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java
  3. 2 2
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java
  4. 6 1
      src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java
  5. 2 2
      src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java
  6. 1 1
      src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java
  7. 1 1
      src/test/org/apache/hadoop/hdfs/TestDFSRemove.java
  8. 1 1
      src/test/org/apache/hadoop/hdfs/TestDFSRename.java
  9. 4 4
      src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java
  10. 1 1
      src/test/org/apache/hadoop/hdfs/TestDecommission.java
  11. 13 10
      src/test/org/apache/hadoop/hdfs/TestFileAppend4.java
  12. 1 1
      src/test/org/apache/hadoop/hdfs/TestFileCorruption.java
  13. 2 1
      src/test/org/apache/hadoop/hdfs/TestLease.java
  14. 1 1
      src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java
  15. 2 2
      src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java
  16. 4 4
      src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java
  17. 1 1
      src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  18. 1 1
      src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
  19. 1 1
      src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java
  20. 1 1
      src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java
  21. 1 1
      src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java

+ 2 - 0
CHANGES.txt

@@ -95,6 +95,8 @@ Release 1.2.0 - unreleased
     HDFS-4071. Add number of stale datanodes to metrics (port of HDFS-4059).
     (Jing Zhao via suresh)
 
+    HDFS-4057. NameNode.namesystem should be private. (Brandon Li via suresh)
+
   OPTIMIZATIONS
 
     HDFS-2533. Backport: Remove needless synchronization on some FSDataSet

+ 1 - 1
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FileChecksumServlets.java

@@ -57,7 +57,7 @@ public class FileChecksumServlets {
       final UserGroupInformation ugi = getUGI(request, conf);
       String tokenString = request.getParameter(JspHelper.DELEGATION_PARAMETER_NAME);
       final NameNode namenode = (NameNode)context.getAttribute("name.node");
-      final DatanodeID datanode = namenode.namesystem.getRandomDatanode();
+      final DatanodeID datanode = namenode.getNamesystem().getRandomDatanode();
       try {
         final URI uri = 
           createRedirectUri("/getFileChecksum", ugi, datanode, request, tokenString);

+ 2 - 2
src/hdfs/org/apache/hadoop/hdfs/server/namenode/FsckServlet.java

@@ -55,8 +55,8 @@ public class FsckServlet extends DfsServlet {
         @Override
         public Object run() throws Exception {
           final NameNode nn = (NameNode) context.getAttribute("name.node");
-          final int totalDatanodes = nn.namesystem.getNumberOfDatanodes(DatanodeReportType.LIVE); 
-          final short minReplication = nn.namesystem.getMinReplication();
+          final int totalDatanodes = nn.getNamesystem().getNumberOfDatanodes(DatanodeReportType.LIVE); 
+          final short minReplication = nn.getNamesystem().getMinReplication();
 
           new NamenodeFsck(conf, nn, nn.getNetworkTopology(), pmap, out,
               totalDatanodes, minReplication, remoteAddress).fsck();

+ 6 - 1
src/hdfs/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -156,7 +156,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
 
   public static final Log LOG = LogFactory.getLog(NameNode.class.getName());
   public static final Log stateChangeLog = LogFactory.getLog("org.apache.hadoop.hdfs.StateChange");
-  public FSNamesystem namesystem; // TODO: This should private. Use getNamesystem() instead. 
+  private FSNamesystem namesystem;
   /** RPC server */
   private Server server;
   /** RPC server for HDFS Services communication.
@@ -190,6 +190,11 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
 
   static NameNodeInstrumentation myMetrics;
 
+  /* Should only be used for test */
+  public void setNamesystem(FSNamesystem ns) {
+    namesystem = ns;
+  }
+  
   public FSNamesystem getNamesystem() {
     return namesystem;
   }

+ 2 - 2
src/test/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -991,8 +991,8 @@ public class MiniDFSCluster {
    * Set the softLimit and hardLimit of client lease periods
    */
   void setLeasePeriod(long soft, long hard) {
-    nameNode.namesystem.leaseManager.setLeasePeriod(soft, hard);
-    nameNode.namesystem.lmthread.interrupt();
+    nameNode.getNamesystem().leaseManager.setLeasePeriod(soft, hard);
+    nameNode.getNamesystem().lmthread.interrupt();
   }
 
   /**

+ 1 - 1
src/test/org/apache/hadoop/hdfs/TestBlocksScheduledCounter.java

@@ -52,7 +52,7 @@ public class TestBlocksScheduledCounter extends TestCase {
     ((DFSOutputStream)(out.getWrappedStream())).sync();
     
     ArrayList<DatanodeDescriptor> dnList = new ArrayList<DatanodeDescriptor>();
-    cluster.getNameNode().namesystem.DFSNodesStatus(dnList, dnList);
+    cluster.getNameNode().getNamesystem().DFSNodesStatus(dnList, dnList);
     DatanodeDescriptor dn = dnList.get(0);
     
     assertEquals(1, dn.getBlocksScheduled());

+ 1 - 1
src/test/org/apache/hadoop/hdfs/TestDFSRemove.java

@@ -28,7 +28,7 @@ import org.apache.hadoop.hdfs.server.datanode.DataNode;
 
 public class TestDFSRemove extends junit.framework.TestCase {
   static int countLease(MiniDFSCluster cluster) {
-    return cluster.getNameNode().namesystem.leaseManager.countLease();
+    return cluster.getNameNode().getNamesystem().leaseManager.countLease();
   }
   
   final Path dir = new Path("/test/remove/");

+ 1 - 1
src/test/org/apache/hadoop/hdfs/TestDFSRename.java

@@ -30,7 +30,7 @@ public class TestDFSRename extends junit.framework.TestCase {
   static Configuration CONF = new Configuration();
   static MiniDFSCluster cluster = null;
   static int countLease(MiniDFSCluster cluster) {
-    return cluster.getNameNode().namesystem.leaseManager.countLease();
+    return cluster.getNameNode().getNamesystem().leaseManager.countLease();
   }
   
   final Path dir = new Path("/test/rename/");

+ 4 - 4
src/test/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -326,7 +326,7 @@ public class TestDatanodeBlockScanner extends TestCase {
      cluster.restartDataNode(corruptReplicasDNIDs[i]);
 
     // Loop until all corrupt replicas are reported
-    int corruptReplicaSize = cluster.getNameNode().namesystem.
+    int corruptReplicaSize = cluster.getNameNode().getNamesystem().
                               corruptReplicas.numCorruptReplicas(blk);
     while (corruptReplicaSize != numCorruptReplicas) {
       try {
@@ -340,7 +340,7 @@ public class TestDatanodeBlockScanner extends TestCase {
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      corruptReplicaSize = cluster.getNameNode().namesystem.
+      corruptReplicaSize = cluster.getNameNode().getNamesystem().
                               corruptReplicas.numCorruptReplicas(blk);
     }
     
@@ -361,7 +361,7 @@ public class TestDatanodeBlockScanner extends TestCase {
 
     // Make sure the corrupt replica is invalidated and removed from
     // corruptReplicasMap
-    corruptReplicaSize = cluster.getNameNode().namesystem.
+    corruptReplicaSize = cluster.getNameNode().getNamesystem().
                           corruptReplicas.numCorruptReplicas(blk);
     while (corruptReplicaSize != 0 || replicaCount != numReplicas) {
       try {
@@ -369,7 +369,7 @@ public class TestDatanodeBlockScanner extends TestCase {
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       }
-      corruptReplicaSize = cluster.getNameNode().namesystem.
+      corruptReplicaSize = cluster.getNameNode().getNamesystem().
                             corruptReplicas.numCorruptReplicas(blk);
       blocks = dfsClient.namenode.
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);

+ 1 - 1
src/test/org/apache/hadoop/hdfs/TestDecommission.java

@@ -183,7 +183,7 @@ public class TestDecommission extends TestCase {
     ArrayList<String> nodes = new ArrayList<String>(decommissionedNodes);
     nodes.add(nodename);
     writeConfigFile(localFileSys, excludeFile, nodes);
-    namenode.namesystem.refreshNodes(conf);
+    namenode.getNamesystem().refreshNodes(conf);
     return nodename;
   }
 

+ 13 - 10
src/test/org/apache/hadoop/hdfs/TestFileAppend4.java

@@ -780,9 +780,9 @@ public class TestFileAppend4 extends TestCase {
 
       // Make the NN fail to commitBlockSynchronization one time
       NameNode nn = cluster.getNameNode();
-      nn.namesystem = spy(nn.namesystem);
+      nn.setNamesystem(spy(nn.getNamesystem()));
       doAnswer(new ThrowNTimesAnswer(IOException.class, 1)).
-        when(nn.namesystem).
+        when(nn.getNamesystem()).
         commitBlockSynchronization((Block)anyObject(), anyInt(), anyInt(),
                                    anyBoolean(), anyBoolean(),
                                    (DatanodeID[])anyObject());
@@ -890,9 +890,9 @@ public class TestFileAppend4 extends TestCase {
     // Allow us to delay commitBlockSynchronization
     DelayAnswer delayer = new DelayAnswer();
     NameNode nn = cluster.getNameNode();
-    nn.namesystem = spy(nn.namesystem);
+    nn.setNamesystem(spy(nn.getNamesystem()));
     doAnswer(delayer).
-      when(nn.namesystem).
+      when(nn.getNamesystem()).
       commitBlockSynchronization((Block) anyObject(), anyInt(), anyInt(),
         anyBoolean(), anyBoolean(),
         (DatanodeID[]) anyObject());
@@ -1235,7 +1235,7 @@ public class TestFileAppend4 extends TestCase {
       LOG.info("======== Writing");
       AppendTestUtil.write(stm, 0, halfBlock/2);
       LOG.info("======== Checking progress");
-      assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true));
+      assertFalse(NameNodeAdapter.checkFileProgress(nn.getNamesystem(), "/delayedReceiveBlock", true));
       LOG.info("======== Closing");
       stm.close();
 
@@ -1286,7 +1286,8 @@ public class TestFileAppend4 extends TestCase {
       AppendTestUtil.write(stm, 0, halfBlock/4);
 
       LOG.info("======== Checking progress");
-      assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true));
+      assertFalse(NameNodeAdapter.checkFileProgress(nn.getNamesystem(),
+          "/delayedReceiveBlock", true));
       LOG.info("======== Closing");
       stm.close();
 
@@ -1320,7 +1321,8 @@ public class TestFileAppend4 extends TestCase {
       waitForBlockReplication(fs1, "/delayedReceiveBlock", 0, 3000);
 
       LOG.info("======== Checking not complete");
-      assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true));
+      assertFalse(NameNodeAdapter.checkFileProgress(nn.getNamesystem(),
+          "/delayedReceiveBlock", true));
 
       // Stop one of the DNs, don't restart
       MiniDFSCluster.DataNodeProperties dnprops = cluster.stopDataNode(0);
@@ -1330,7 +1332,8 @@ public class TestFileAppend4 extends TestCase {
 
       // Make sure we don't see the file as complete
       LOG.info("======== Checking progress");
-      assertFalse(NameNodeAdapter.checkFileProgress(nn.namesystem, "/delayedReceiveBlock", true));
+      assertFalse(NameNodeAdapter.checkFileProgress(nn.getNamesystem(),
+          "/delayedReceiveBlock", true));
       LOG.info("======== Closing");
       stm.close();
 
@@ -1361,9 +1364,9 @@ public class TestFileAppend4 extends TestCase {
       DelayAnswer delayer = new DelayAnswer(false);
 
       NameNode nn = cluster.getNameNode();
-      nn.namesystem = spy(nn.namesystem);
+      nn.setNamesystem(spy(nn.getNamesystem()));
       NameNodeAdapter.callNextGenerationStampForBlock(
-        doAnswer(delayer).when(nn.namesystem),
+        doAnswer(delayer).when(nn.getNamesystem()),
         (Block)anyObject(), anyBoolean());
 
       final AtomicReference<Throwable> err = new AtomicReference<Throwable>();

+ 1 - 1
src/test/org/apache/hadoop/hdfs/TestFileCorruption.java

@@ -134,7 +134,7 @@ public class TestFileCorruption extends TestCase {
       DataNode dataNode = datanodes.get(2);
       
       // report corrupted block by the third datanode
-      cluster.getNameNode().namesystem.markBlockAsCorrupt(blk, 
+      cluster.getNameNode().getNamesystem().markBlockAsCorrupt(blk, 
           new DatanodeInfo(dataNode.dnRegistration ));
       
       // open the file

+ 2 - 1
src/test/org/apache/hadoop/hdfs/TestLease.java

@@ -25,7 +25,8 @@ import org.apache.hadoop.fs.Path;
 
 public class TestLease extends junit.framework.TestCase {
   static boolean hasLease(MiniDFSCluster cluster, Path src) {
-    return cluster.getNameNode().namesystem.leaseManager.getLeaseByPath(src.toString()) != null;
+    return cluster.getNameNode().getNamesystem().leaseManager
+        .getLeaseByPath(src.toString()) != null;
   }
   
   final Path dir = new Path("/test/lease/");

+ 1 - 1
src/test/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -138,7 +138,7 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
 
       BlockMetaDataInfo[] updatedmetainfo = new BlockMetaDataInfo[REPLICATION_NUM];
       int minsize = min(newblocksizes);
-      long currentGS = cluster.getNameNode().namesystem.getGenerationStamp();
+      long currentGS = cluster.getNameNode().getNamesystem().getGenerationStamp();
       lastblock.setGenerationStamp(currentGS);
       for(int i = 0; i < REPLICATION_NUM; i++) {
         updatedmetainfo[i] = datanodes[i].getBlockMetaDataInfo(lastblock);

+ 2 - 2
src/test/org/apache/hadoop/hdfs/server/datanode/TestDiskError.java

@@ -95,8 +95,8 @@ public class TestDiskError extends TestCase {
       DFSTestUtil.waitReplication(fs, fileName, (short)1);
 
       // get the block belonged to the created file
-      LocatedBlocks blocks = cluster.getNameNode().namesystem.getBlockLocations(
-          fileName.toString(), 0, (long)fileLen);
+      LocatedBlocks blocks = cluster.getNameNode().getNamesystem()
+          .getBlockLocations(fileName.toString(), 0, (long) fileLen);
       assertEquals(blocks.locatedBlockCount(), 1);
       LocatedBlock block = blocks.get(0);
       

+ 4 - 4
src/test/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java

@@ -1049,9 +1049,9 @@ public class NNThroughputBenchmark {
       // start data-nodes; create a bunch of files; generate block reports.
       blockReportObject.generateInputs(ignore);
       // stop replication monitor
-      nameNode.namesystem.replthread.interrupt();
+      nameNode.getNamesystem().replthread.interrupt();
       try {
-        nameNode.namesystem.replthread.join();
+        nameNode.getNamesystem().replthread.join();
       } catch(InterruptedException ei) {
         return;
       }
@@ -1063,7 +1063,7 @@ public class NNThroughputBenchmark {
       // decommission data-nodes
       decommissionNodes();
       // set node replication limit
-      nameNode.namesystem.setNodeReplicationLimit(nodeReplicationLimit);
+      nameNode.getNamesystem().setNodeReplicationLimit(nodeReplicationLimit);
     }
 
     private void decommissionNodes() throws IOException {
@@ -1094,7 +1094,7 @@ public class NNThroughputBenchmark {
       assert daemonId < numThreads : "Wrong daemonId.";
       long start = System.currentTimeMillis();
       // compute data-node work
-      int work = nameNode.namesystem.computeDatanodeWork();
+      int work = nameNode.getNamesystem().computeDatanodeWork();
       long end = System.currentTimeMillis();
       numPendingBlocks += work;
       if(work == 0)

+ 1 - 1
src/test/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -452,7 +452,7 @@ public class TestFsck extends TestCase {
       DFSTestUtil.waitReplication(fs, filePath, (short)1);
       
       // intentionally corrupt NN data structure
-      INodeFile node = (INodeFile)cluster.getNameNode().namesystem.dir.rootDir.getNode(fileName);
+      INodeFile node = (INodeFile)cluster.getNameNode().getNamesystem().dir.rootDir.getNode(fileName);
       assertEquals(node.blocks.length, 1);
       node.blocks[0].setNumBytes(-1L);  // set the block length to be negative
       

+ 1 - 1
src/test/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java

@@ -52,7 +52,7 @@ public class TestNameNodeMXBean {
       cluster = new MiniDFSCluster(conf, 1, true, null);
       cluster.waitActive();
 
-      FSNamesystem fsn = cluster.getNameNode().namesystem;
+      FSNamesystem fsn = cluster.getNameNode().getNamesystem();
 
       MBeanServer mbs = ManagementFactory.getPlatformMBeanServer();
       ObjectName mxbeanName = new ObjectName(

+ 1 - 1
src/test/org/apache/hadoop/hdfs/server/namenode/TestNamenodeCapacityReport.java

@@ -55,7 +55,7 @@ public class TestNamenodeCapacityReport extends TestCase {
       cluster = new MiniDFSCluster(conf, 1, true, null);
       cluster.waitActive();
       
-      FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
       
       // Ensure the data reported for each data node is right
       ArrayList<DatanodeDescriptor> live = new ArrayList<DatanodeDescriptor>();

+ 1 - 1
src/test/org/apache/hadoop/hdfs/server/namenode/TestNodeCount.java

@@ -26,7 +26,7 @@ public class TestNodeCount extends TestCase {
     final MiniDFSCluster cluster = 
       new MiniDFSCluster(conf, REPLICATION_FACTOR, true, null);
     try {
-      final FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      final FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
       final FileSystem fs = cluster.getFileSystem();
       
       // populate the cluster with a one block file

+ 1 - 1
src/test/org/apache/hadoop/hdfs/server/namenode/TestUnderReplicatedBlocks.java

@@ -25,7 +25,7 @@ public class TestUnderReplicatedBlocks extends TestCase {
       
       // remove one replica from the blocksMap so block becomes under-replicated
       // but the block does not get put into the under-replicated blocks queue
-      FSNamesystem namesystem = cluster.getNameNode().namesystem;
+      FSNamesystem namesystem = cluster.getNameNode().getNamesystem();
       Block b = DFSTestUtil.getFirstBlock(fs, FILE_PATH);
       DatanodeDescriptor dn = namesystem.blocksMap.nodeIterator(b).next();
       namesystem.addToInvalidates(b, dn);