Browse Source

HDFS-514. Change DFSClient.namenode from public to private. Contributed by Bill Zeller

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@799480 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 16 years ago
parent
commit
2dc27d29bc

+ 3 - 0
CHANGES.txt

@@ -60,6 +60,9 @@ Trunk (unreleased changes)
     HDFS-500. Deprecate NameNode methods deprecated in NameNodeProtocol.
     HDFS-500. Deprecate NameNode methods deprecated in NameNodeProtocol.
     (Jakob Homan via shv)
     (Jakob Homan via shv)
 
 
+    HDFS-514. Change DFSClient.namenode from public to private.  (Bill Zeller
+    via szetszwo)
+
   BUG FIXES
   BUG FIXES
     HDFS-76. Better error message to users when commands fail because of 
     HDFS-76. Better error message to users when commands fail because of 
     lack of quota. Allow quota to be set even if the limit is lower than
     lack of quota. Allow quota to be set even if the limit is lower than

+ 9 - 1
src/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -127,7 +127,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final Log LOG = LogFactory.getLog(DFSClient.class);
   public static final int MAX_BLOCK_ACQUIRE_FAILURES = 3;
   public static final int MAX_BLOCK_ACQUIRE_FAILURES = 3;
   private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
   private static final int TCP_WINDOW_SIZE = 128 * 1024; // 128 KB
-  public final ClientProtocol namenode;
+  final private ClientProtocol namenode;
   final private ClientProtocol rpcNamenode;
   final private ClientProtocol rpcNamenode;
   final UnixUserGroupInformation ugi;
   final UnixUserGroupInformation ugi;
   volatile boolean clientRunning = true;
   volatile boolean clientRunning = true;
@@ -430,6 +430,14 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     return create(src, overwrite, replication, blockSize, null);
     return create(src, overwrite, replication, blockSize, null);
   }
   }
 
 
+  /**
+   * Get the namenode associated with this DFSClient object
+   * @return the namenode associated with this DFSClient object
+   */
+  public ClientProtocol getNamenode() {
+    return namenode;
+  }
+  
   
   
   /**
   /**
    * Create a new dfs file with the specified block replication 
    * Create a new dfs file with the specified block replication 

+ 6 - 6
src/java/org/apache/hadoop/hdfs/server/datanode/DatanodeJspHelper.java

@@ -65,7 +65,7 @@ class DatanodeJspHelper {
       JspHelper.printGotoForm(out, namenodeInfoPort, target);
       JspHelper.printGotoForm(out, namenodeInfoPort, target);
     } else {
     } else {
       if (!targetStatus.isDir()) { // a file
       if (!targetStatus.isDir()) { // a file
-        List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(dir, 0, 1)
+        List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(dir, 0, 1)
             .getLocatedBlocks();
             .getLocatedBlocks();
 
 
         LocatedBlock firstBlock = null;
         LocatedBlock firstBlock = null;
@@ -205,7 +205,7 @@ class DatanodeJspHelper {
 
 
     final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(),
     final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(),
         JspHelper.conf);
         JspHelper.conf);
-    List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(filename, 0,
+    List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
         Long.MAX_VALUE).getLocatedBlocks();
         Long.MAX_VALUE).getLocatedBlocks();
     // Add the various links for looking at the file contents
     // Add the various links for looking at the file contents
     // URL for downloading the full file
     // URL for downloading the full file
@@ -320,7 +320,7 @@ class DatanodeJspHelper {
     AccessToken accessToken = AccessToken.DUMMY_TOKEN;
     AccessToken accessToken = AccessToken.DUMMY_TOKEN;
     if (JspHelper.conf.getBoolean(
     if (JspHelper.conf.getBoolean(
         AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false)) {
         AccessTokenHandler.STRING_ENABLE_ACCESS_TOKEN, false)) {
-      List<LocatedBlock> blks = dfs.namenode.getBlockLocations(filename, 0,
+      List<LocatedBlock> blks = dfs.getNamenode().getBlockLocations(filename, 0,
           Long.MAX_VALUE).getLocatedBlocks();
           Long.MAX_VALUE).getLocatedBlocks();
       if (blks == null || blks.size() == 0) {
       if (blks == null || blks.size() == 0) {
         out.print("Can't locate file blocks");
         out.print("Can't locate file blocks");
@@ -390,7 +390,7 @@ class DatanodeJspHelper {
     // determine data for the next link
     // determine data for the next link
     if (startOffset + chunkSizeToView >= blockSize) {
     if (startOffset + chunkSizeToView >= blockSize) {
       // we have to go to the next block from this point onwards
       // we have to go to the next block from this point onwards
-      List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(filename, 0,
+      List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
           Long.MAX_VALUE).getLocatedBlocks();
           Long.MAX_VALUE).getLocatedBlocks();
       for (int i = 0; i < blocks.size(); i++) {
       for (int i = 0; i < blocks.size(); i++) {
         if (blocks.get(i).getBlock().getBlockId() == blockId) {
         if (blocks.get(i).getBlock().getBlockId() == blockId) {
@@ -440,7 +440,7 @@ class DatanodeJspHelper {
     int prevPort = req.getServerPort();
     int prevPort = req.getServerPort();
     int prevDatanodePort = datanodePort;
     int prevDatanodePort = datanodePort;
     if (startOffset == 0) {
     if (startOffset == 0) {
-      List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(filename, 0,
+      List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
           Long.MAX_VALUE).getLocatedBlocks();
           Long.MAX_VALUE).getLocatedBlocks();
       for (int i = 0; i < blocks.size(); i++) {
       for (int i = 0; i < blocks.size(); i++) {
         if (blocks.get(i).getBlock().getBlockId() == blockId) {
         if (blocks.get(i).getBlock().getBlockId() == blockId) {
@@ -546,7 +546,7 @@ class DatanodeJspHelper {
     // fetch the block from the datanode that has the last block for this file
     // fetch the block from the datanode that has the last block for this file
     final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(),
     final DFSClient dfs = new DFSClient(datanode.getNameNodeAddr(),
         JspHelper.conf);
         JspHelper.conf);
-    List<LocatedBlock> blocks = dfs.namenode.getBlockLocations(filename, 0,
+    List<LocatedBlock> blocks = dfs.getNamenode().getBlockLocations(filename, 0,
         Long.MAX_VALUE).getLocatedBlocks();
         Long.MAX_VALUE).getLocatedBlocks();
     if (blocks == null || blocks.size() == 0) {
     if (blocks == null || blocks.size() == 0) {
       out.print("No datanodes contain blocks of file " + filename);
       out.print("No datanodes contain blocks of file " + filename);

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestAbandonBlock.java

@@ -50,10 +50,10 @@ public class TestAbandonBlock extends junit.framework.TestCase {
   
   
       //try reading the block by someone
       //try reading the block by someone
       DFSClient dfsclient = new DFSClient(CONF);
       DFSClient dfsclient = new DFSClient(CONF);
-      LocatedBlocks blocks = dfsclient.namenode.getBlockLocations(src, 0, 1);
+      LocatedBlocks blocks = dfsclient.getNamenode().getBlockLocations(src, 0, 1);
       LocatedBlock b = blocks.get(0); 
       LocatedBlock b = blocks.get(0); 
       try {
       try {
-        dfsclient.namenode.abandonBlock(b.getBlock(), src, "someone");
+        dfsclient.getNamenode().abandonBlock(b.getBlock(), src, "someone");
         //previous line should throw an exception.
         //previous line should throw an exception.
         assertTrue(false);
         assertTrue(false);
       }
       }

+ 8 - 8
src/test/hdfs/org/apache/hadoop/hdfs/TestDatanodeBlockScanner.java

@@ -171,7 +171,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     dfsClient = new DFSClient(new InetSocketAddress("localhost", 
     dfsClient = new DFSClient(new InetSocketAddress("localhost", 
                                         cluster.getNameNodePort()), conf);
                                         cluster.getNameNodePort()), conf);
     do {
     do {
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       blockCount = blocks.get(0).getLocations().length;
       blockCount = blocks.get(0).getLocations().length;
       try {
       try {
@@ -190,7 +190,7 @@ public class TestDatanodeBlockScanner extends TestCase {
 
 
     // We have 2 good replicas and block is not corrupt
     // We have 2 good replicas and block is not corrupt
     do {
     do {
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       blockCount = blocks.get(0).getLocations().length;
       blockCount = blocks.get(0).getLocations().length;
       try {
       try {
@@ -218,7 +218,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     // We now have the blocks to be marked as corrupt and we get back all
     // We now have the blocks to be marked as corrupt and we get back all
     // its replicas
     // its replicas
     do {
     do {
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       blockCount = blocks.get(0).getLocations().length;
       blockCount = blocks.get(0).getLocations().length;
       try {
       try {
@@ -282,7 +282,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     
     
     dfsClient = new DFSClient(new InetSocketAddress("localhost", 
     dfsClient = new DFSClient(new InetSocketAddress("localhost", 
                                         cluster.getNameNodePort()), conf);
                                         cluster.getNameNodePort()), conf);
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     replicaCount = blocks.get(0).getLocations().length;
     replicaCount = blocks.get(0).getLocations().length;
 
 
@@ -294,7 +294,7 @@ public class TestDatanodeBlockScanner extends TestCase {
         Thread.sleep(1000);
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       } catch (InterruptedException ignore) {
       }
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                    getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
       replicaCount = blocks.get(0).getLocations().length;
     }
     }
@@ -332,7 +332,7 @@ public class TestDatanodeBlockScanner extends TestCase {
     }
     }
     
     
     // Loop until the block recovers after replication
     // Loop until the block recovers after replication
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     replicaCount = blocks.get(0).getLocations().length;
     replicaCount = blocks.get(0).getLocations().length;
     while (replicaCount != numReplicas) {
     while (replicaCount != numReplicas) {
@@ -341,7 +341,7 @@ public class TestDatanodeBlockScanner extends TestCase {
         Thread.sleep(1000);
         Thread.sleep(1000);
       } catch (InterruptedException ignore) {
       } catch (InterruptedException ignore) {
       }
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
       replicaCount = blocks.get(0).getLocations().length;
     }
     }
@@ -358,7 +358,7 @@ public class TestDatanodeBlockScanner extends TestCase {
       }
       }
       corruptReplicaSize = cluster.getNamesystem().
       corruptReplicaSize = cluster.getNamesystem().
                             numCorruptReplicas(blk);
                             numCorruptReplicas(blk);
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                  getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
       replicaCount = blocks.get(0).getLocations().length;
     }
     }

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend.java

@@ -168,7 +168,7 @@ public class TestFileAppend extends TestCase {
       assertTrue("There should be only one datanode but found " + dn.length,
       assertTrue("There should be only one datanode but found " + dn.length,
                   dn.length == 1);
                   dn.length == 1);
 
 
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
                                   file1.toString(), 0, Long.MAX_VALUE);
       List<LocatedBlock> blocks = locations.getLocatedBlocks();
       List<LocatedBlock> blocks = locations.getLocatedBlocks();
       FSDataset dataset = (FSDataset) dn[0].data;
       FSDataset dataset = (FSDataset) dn[0].data;

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestFileAppend3.java

@@ -160,7 +160,7 @@ public class TestFileAppend3 extends junit.framework.TestCase {
 
 
     //b. Log into one datanode that has one replica of this block.
     //b. Log into one datanode that has one replica of this block.
     //   Find the block file on this datanode and truncate it to zero size.
     //   Find the block file on this datanode and truncate it to zero size.
-    final LocatedBlocks locatedblocks = fs.dfs.namenode.getBlockLocations(p.toString(), 0L, len1);
+    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(p.toString(), 0L, len1);
     assertEquals(1, locatedblocks.locatedBlockCount());
     assertEquals(1, locatedblocks.locatedBlockCount());
     final LocatedBlock lb = locatedblocks.get(0);
     final LocatedBlock lb = locatedblocks.get(0);
     final Block blk = lb.getBlock();
     final Block blk = lb.getBlock();
@@ -224,7 +224,7 @@ public class TestFileAppend3 extends junit.framework.TestCase {
 
 
     //check block sizes 
     //check block sizes 
     final long len = fs.getFileStatus(pnew).getLen();
     final long len = fs.getFileStatus(pnew).getLen();
-    final LocatedBlocks locatedblocks = fs.dfs.namenode.getBlockLocations(pnew.toString(), 0L, len);
+    final LocatedBlocks locatedblocks = fs.dfs.getNamenode().getBlockLocations(pnew.toString(), 0L, len);
     final int numblock = locatedblocks.locatedBlockCount();
     final int numblock = locatedblocks.locatedBlockCount();
     for(int i = 0; i < numblock; i++) {
     for(int i = 0; i < numblock; i++) {
       final LocatedBlock lb = locatedblocks.get(i);
       final LocatedBlock lb = locatedblocks.get(i);

+ 8 - 8
src/test/hdfs/org/apache/hadoop/hdfs/TestFileCreation.java

@@ -372,7 +372,7 @@ public class TestFileCreation extends junit.framework.TestCase {
 
 
       // verify that no blocks are associated with this file
       // verify that no blocks are associated with this file
       // bad block allocations were cleaned up earlier.
       // bad block allocations were cleaned up earlier.
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up",
       assertTrue("Error blocks were not cleaned up",
@@ -411,18 +411,18 @@ public class TestFileCreation extends junit.framework.TestCase {
       System.out.println("testFileCreationError2: "
       System.out.println("testFileCreationError2: "
                          + "Created file filestatus.dat with one replicas.");
                          + "Created file filestatus.dat with one replicas.");
 
 
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("testFileCreationError2: "
       System.out.println("testFileCreationError2: "
           + "The file has " + locations.locatedBlockCount() + " blocks.");
           + "The file has " + locations.locatedBlockCount() + " blocks.");
 
 
       // add another block to the file
       // add another block to the file
-      LocatedBlock location = client.namenode.addBlock(file1.toString(), 
+      LocatedBlock location = client.getNamenode().addBlock(file1.toString(), 
           client.clientName);
           client.clientName);
       System.out.println("testFileCreationError2: "
       System.out.println("testFileCreationError2: "
           + "Added block " + location.getBlock());
           + "Added block " + location.getBlock());
 
 
-      locations = client.namenode.getBlockLocations(file1.toString(), 
+      locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
                                                     0, Long.MAX_VALUE);
       int count = locations.locatedBlockCount();
       int count = locations.locatedBlockCount();
       System.out.println("testFileCreationError2: "
       System.out.println("testFileCreationError2: "
@@ -439,7 +439,7 @@ public class TestFileCreation extends junit.framework.TestCase {
       }
       }
 
 
       // verify that the last block was synchronized.
       // verify that the last block was synchronized.
-      locations = client.namenode.getBlockLocations(file1.toString(), 
+      locations = client.getNamenode().getBlockLocations(file1.toString(), 
                                                     0, Long.MAX_VALUE);
                                                     0, Long.MAX_VALUE);
       System.out.println("testFileCreationError2: "
       System.out.println("testFileCreationError2: "
           + "locations = " + locations.locatedBlockCount());
           + "locations = " + locations.locatedBlockCount());
@@ -567,14 +567,14 @@ public class TestFileCreation extends junit.framework.TestCase {
 
 
       // verify that new block is associated with this file
       // verify that new block is associated with this file
       DFSClient client = ((DistributedFileSystem)fs).dfs;
       DFSClient client = ((DistributedFileSystem)fs).dfs;
-      LocatedBlocks locations = client.namenode.getBlockLocations(
+      LocatedBlocks locations = client.getNamenode().getBlockLocations(
                                   file1.toString(), 0, Long.MAX_VALUE);
                                   file1.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file1,
       assertTrue("Error blocks were not cleaned up for file " + file1,
                  locations.locatedBlockCount() == 3);
                  locations.locatedBlockCount() == 3);
 
 
       // verify filestatus2.dat
       // verify filestatus2.dat
-      locations = client.namenode.getBlockLocations(
+      locations = client.getNamenode().getBlockLocations(
                                   file2.toString(), 0, Long.MAX_VALUE);
                                   file2.toString(), 0, Long.MAX_VALUE);
       System.out.println("locations = " + locations.locatedBlockCount());
       System.out.println("locations = " + locations.locatedBlockCount());
       assertTrue("Error blocks were not cleaned up for file " + file2,
       assertTrue("Error blocks were not cleaned up for file " + file2,
@@ -790,7 +790,7 @@ public class TestFileCreation extends junit.framework.TestCase {
       // wait for the lease to expire
       // wait for the lease to expire
       try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}
       try {Thread.sleep(5 * leasePeriod);} catch (InterruptedException e) {}
 
 
-      LocatedBlocks locations = dfs.dfs.namenode.getBlockLocations(
+      LocatedBlocks locations = dfs.dfs.getNamenode().getBlockLocations(
           f, 0, Long.MAX_VALUE);
           f, 0, Long.MAX_VALUE);
       assertEquals(1, locations.locatedBlockCount());
       assertEquals(1, locations.locatedBlockCount());
       LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);
       LocatedBlock locatedblock = locations.getLocatedBlocks().get(0);

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestGetBlocks.java

@@ -76,7 +76,7 @@ public class TestGetBlocks extends TestCase {
       boolean notWritten;
       boolean notWritten;
       do {
       do {
         DFSClient dfsclient = new DFSClient(CONF);
         DFSClient dfsclient = new DFSClient(CONF);
-        locatedBlocks = dfsclient.namenode.
+        locatedBlocks = dfsclient.getNamenode().
           getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
           getBlockLocations("/tmp.txt", 0, fileLen).getLocatedBlocks();
         assertEquals(2, locatedBlocks.size());
         assertEquals(2, locatedBlocks.size());
         notWritten = false;
         notWritten = false;

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestInjectionForSimulatedStorage.java

@@ -148,7 +148,7 @@ public class TestInjectionForSimulatedStorage extends TestCase {
       writeFile(cluster.getFileSystem(), testPath, numDataNodes);
       writeFile(cluster.getFileSystem(), testPath, numDataNodes);
 
 
       
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, 20);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, 20);
 
 
       
       
       Block[][] blocksList = cluster.getAllBlockReports();
       Block[][] blocksList = cluster.getAllBlockReports();
@@ -188,7 +188,7 @@ public class TestInjectionForSimulatedStorage extends TestCase {
                                   cluster.getNameNodePort()),
                                   cluster.getNameNodePort()),
                                   conf);
                                   conf);
       
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
       
       
     } finally {
     } finally {
       if (cluster != null) {
       if (cluster != null) {

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestLeaseRecovery.java

@@ -75,7 +75,7 @@ public class TestLeaseRecovery extends junit.framework.TestCase {
 
 
       //get block info for the last block
       //get block info for the last block
       LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
       LocatedBlock locatedblock = TestInterDatanodeProtocol.getLastLocatedBlock(
-          dfs.dfs.namenode, filestr);
+          dfs.dfs.getNamenode(), filestr);
       DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
       DatanodeInfo[] datanodeinfos = locatedblock.getLocations();
       assertEquals(REPLICATION_NUM, datanodeinfos.length);
       assertEquals(REPLICATION_NUM, datanodeinfos.length);
 
 

+ 8 - 8
src/test/hdfs/org/apache/hadoop/hdfs/TestReplication.java

@@ -171,7 +171,7 @@ public class TestReplication extends TestCase {
     fs.setReplication(file1, (short)2);
     fs.setReplication(file1, (short)2);
   
   
     // Now get block details and check if the block is corrupt
     // Now get block details and check if the block is corrupt
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
               getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     while (blocks.get(0).isCorrupt() != true) {
     while (blocks.get(0).isCorrupt() != true) {
       try {
       try {
@@ -179,7 +179,7 @@ public class TestReplication extends TestCase {
         Thread.sleep(1000);
         Thread.sleep(1000);
       } catch (InterruptedException ie) {
       } catch (InterruptedException ie) {
       }
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     }
     }
     replicaCount = blocks.get(0).getLocations().length;
     replicaCount = blocks.get(0).getLocations().length;
@@ -321,10 +321,10 @@ public class TestReplication extends TestCase {
       out.write(buffer);
       out.write(buffer);
       out.close();
       out.close();
       
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
 
 
       // get first block of the file.
       // get first block of the file.
-      String block = dfsClient.namenode.
+      String block = dfsClient.getNamenode().
                        getBlockLocations(testFile, 0, Long.MAX_VALUE).
                        getBlockLocations(testFile, 0, Long.MAX_VALUE).
                        get(0).getBlock().getBlockName();
                        get(0).getBlock().getBlockName();
       
       
@@ -386,7 +386,7 @@ public class TestReplication extends TestCase {
                                   cluster.getNameNodePort()),
                                   cluster.getNameNodePort()),
                                   conf);
                                   conf);
       
       
-      waitForBlockReplication(testFile, dfsClient.namenode, numDataNodes, -1);
+      waitForBlockReplication(testFile, dfsClient.getNamenode(), numDataNodes, -1);
       
       
     } finally {
     } finally {
       if (cluster != null) {
       if (cluster != null) {
@@ -436,19 +436,19 @@ public class TestReplication extends TestCase {
     // block replication triggers corrupt block detection
     // block replication triggers corrupt block detection
     DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", 
     DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost", 
         cluster.getNameNodePort()), fs.getConf());
         cluster.getNameNodePort()), fs.getConf());
-    LocatedBlocks blocks = dfsClient.namenode.getBlockLocations(
+    LocatedBlocks blocks = dfsClient.getNamenode().getBlockLocations(
         fileName.toString(), 0, fileLen);
         fileName.toString(), 0, fileLen);
     if (lenDelta < 0) { // replica truncated
     if (lenDelta < 0) { // replica truncated
     	while (!blocks.get(0).isCorrupt() || 
     	while (!blocks.get(0).isCorrupt() || 
     			REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
     			REPLICATION_FACTOR != blocks.get(0).getLocations().length) {
     		Thread.sleep(100);
     		Thread.sleep(100);
-    		blocks = dfsClient.namenode.getBlockLocations(
+    		blocks = dfsClient.getNamenode().getBlockLocations(
     				fileName.toString(), 0, fileLen);
     				fileName.toString(), 0, fileLen);
     	}
     	}
     } else { // no corruption detected; block replicated
     } else { // no corruption detected; block replicated
     	while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
     	while (REPLICATION_FACTOR+1 != blocks.get(0).getLocations().length) {
     		Thread.sleep(100);
     		Thread.sleep(100);
-    		blocks = dfsClient.namenode.getBlockLocations(
+    		blocks = dfsClient.getNamenode().getBlockLocations(
     				fileName.toString(), 0, fileLen);
     				fileName.toString(), 0, fileLen);
     	}
     	}
     }
     }

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestBlockReplacement.java

@@ -110,7 +110,7 @@ public class TestBlockReplacement extends TestCase {
       InetSocketAddress addr = new InetSocketAddress("localhost",
       InetSocketAddress addr = new InetSocketAddress("localhost",
           cluster.getNameNodePort());
           cluster.getNameNodePort());
       DFSClient client = new DFSClient(addr, CONF);
       DFSClient client = new DFSClient(addr, CONF);
-      List<LocatedBlock> locatedBlocks = client.namenode.
+      List<LocatedBlock> locatedBlocks = client.getNamenode().
         getBlockLocations("/tmp.txt", 0, DEFAULT_BLOCK_SIZE).getLocatedBlocks();
         getBlockLocations("/tmp.txt", 0, DEFAULT_BLOCK_SIZE).getLocatedBlocks();
       assertEquals(1, locatedBlocks.size());
       assertEquals(1, locatedBlocks.size());
       LocatedBlock block = locatedBlocks.get(0);
       LocatedBlock block = locatedBlocks.get(0);
@@ -194,7 +194,7 @@ public class TestBlockReplacement extends TestCase {
         Thread.sleep(100);
         Thread.sleep(100);
       } catch(InterruptedException e) {
       } catch(InterruptedException e) {
       }
       }
-      List<LocatedBlock> blocks = client.namenode.
+      List<LocatedBlock> blocks = client.getNamenode().
       getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
       getBlockLocations(fileName, 0, fileLen).getLocatedBlocks();
       assertEquals(1, blocks.size());
       assertEquals(1, blocks.size());
       DatanodeInfo[] nodes = blocks.get(0).getLocations();
       DatanodeInfo[] nodes = blocks.get(0).getLocations();

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/server/datanode/TestInterDatanodeProtocol.java

@@ -83,7 +83,7 @@ public class TestInterDatanodeProtocol extends junit.framework.TestCase {
       assertTrue(dfs.getClient().exists(filestr));
       assertTrue(dfs.getClient().exists(filestr));
 
 
       //get block info
       //get block info
-      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().namenode, filestr);
+      LocatedBlock locatedblock = getLastLocatedBlock(dfs.getClient().getNamenode(), filestr);
       DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       DatanodeInfo[] datanodeinfo = locatedblock.getLocations();
       assertTrue(datanodeinfo.length > 0);
       assertTrue(datanodeinfo.length > 0);
 
 

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -175,7 +175,7 @@ public class TestFsck extends TestCase {
       String[] fileNames = util.getFileNames(topDir);
       String[] fileNames = util.getFileNames(topDir);
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
       DFSClient dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                           cluster.getNameNodePort()), conf);
                                           cluster.getNameNodePort()), conf);
-      String block = dfsClient.namenode.
+      String block = dfsClient.getNamenode().
                       getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
                       getBlockLocations(fileNames[0], 0, Long.MAX_VALUE).
                       get(0).getBlock().getBlockName();
                       get(0).getBlock().getBlockName();
       File baseDir = new File(System.getProperty("test.build.data",
       File baseDir = new File(System.getProperty("test.build.data",
@@ -315,7 +315,7 @@ public class TestFsck extends TestCase {
 
 
     dfsClient = new DFSClient(new InetSocketAddress("localhost",
     dfsClient = new DFSClient(new InetSocketAddress("localhost",
                                cluster.getNameNodePort()), conf);
                                cluster.getNameNodePort()), conf);
-    blocks = dfsClient.namenode.
+    blocks = dfsClient.getNamenode().
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
     replicaCount = blocks.get(0).getLocations().length;
     replicaCount = blocks.get(0).getLocations().length;
     while (replicaCount != 3) {
     while (replicaCount != 3) {
@@ -323,7 +323,7 @@ public class TestFsck extends TestCase {
         Thread.sleep(100);
         Thread.sleep(100);
       } catch (InterruptedException ignore) {
       } catch (InterruptedException ignore) {
       }
       }
-      blocks = dfsClient.namenode.
+      blocks = dfsClient.getNamenode().
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
                 getBlockLocations(file1.toString(), 0, Long.MAX_VALUE);
       replicaCount = blocks.get(0).getLocations().length;
       replicaCount = blocks.get(0).getLocations().length;
     }
     }