Selaa lähdekoodia

HDFS-892. Optionally use Avro reflection for Namenode RPC.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/trunk@927198 13f79535-47bb-0310-9956-ffa450edef68
Doug Cutting 15 vuotta sitten
vanhempi
commit
3cf0f4965c

+ 4 - 0
CHANGES.txt

@@ -106,6 +106,10 @@ Trunk (unreleased changes)
     HDFS-1043. NNThroughputBenchmark modifications to support benchmarking of
     server-side user group resolution. (shv)
 
+    HDFS-982. Optionally use Avro reflection for Namenode RPC.  This
+    is not a complete implementation yet, but rather a starting point.
+    (cutting)
+
   OPTIMIZATIONS
 
     HDFS-946. NameNode should not return full path name when lisitng a

+ 14 - 0
build.xml

@@ -97,6 +97,7 @@
   <property name="test.hdfs.commit.tests.file" value="${test.src.dir}/commit-tests" />
   <property name="test.hdfs.all.tests.file" value="${test.src.dir}/all-tests" />
 
+  <property name="test.hdfs.rpc.engine" value=""/>
   <property name="test.libhdfs.dir" value="${test.build.dir}/libhdfs"/>
 
   <property name="web.src.dir" value="${basedir}/src/web"/>
@@ -308,6 +309,18 @@
       <classpath refid="classpath"/>
     </javac>   
 
+    <taskdef
+       name="paranamer" 
+       classname="com.thoughtworks.paranamer.ant.ParanamerGeneratorTask">
+      <classpath refid="classpath" />
+    </taskdef>
+    <paranamer
+       sourceDirectory="${java.src.dir}/org/apache/hadoop/hdfs/protocol"
+       outputDirectory="${build.classes}"/>
+    <paranamer
+       sourceDirectory="${java.src.dir}/org/apache/hadoop/hdfs/server/protocol"
+       outputDirectory="${build.classes}"/>
+
     <copy todir="${build.classes}">
      <fileset dir="${java.src.dir}" includes="**/*.properties"/>
      <fileset dir="${java.src.dir}" includes="hdfs-default.xml"/>
@@ -513,6 +526,7 @@
         <sysproperty key="test.src.dir" value="${test.src.dir}"/>
         <sysproperty key="test.build.extraconf" value="${test.build.extraconf}" />
         <sysproperty key="hadoop.policy.file" value="hadoop-policy.xml"/>
+        <sysproperty key="hdfs.rpc.engine" value="${test.hdfs.rpc.engine}"/>
         <classpath refid="test.classpath"/>
         <!-- Pass probability specifications to the spawn JVM -->
         <syspropertyset id="FaultProbabilityProperties">

+ 1 - 0
ivy.xml

@@ -58,6 +58,7 @@
     <dependency org="org.apache.hadoop" name="hadoop-core" rev="${hadoop-core.version}" conf="common->default"/>
     <dependency org="commons-logging" name="commons-logging" rev="${commons-logging.version}" conf="common->master"/>
     <dependency org="log4j" name="log4j" rev="${log4j.version}" conf="common->master"/>
+    <dependency org="org.apache.hadoop" name="avro" rev="${avro.version}" conf="common->default"/>
     <dependency org="org.aspectj" name="aspectjrt" rev="${aspectj.version}" conf="common->default"/>
     <dependency org="org.aspectj" name="aspectjtools" rev="${aspectj.version}" conf="common->default"/>
 

+ 2 - 2
ivy/ivysettings.xml

@@ -43,8 +43,8 @@
         checkmodified="true" changingPattern=".*SNAPSHOT"/>
 
     <filesystem name="fs" m2compatible="true" force="true">
-       <artifact pattern="${repo.dir}/org/apache/hadoop/[module]/[revision]/[module]-[revision].[ext]"/>
-       <ivy pattern="${repo.dir}/org/apache/hadoop/[module]/[revision]/[module]-[revision].pom"/>
+       <artifact pattern="${repo.dir}/[organisation]/[module]/[revision]/[module]-[revision].[ext]"/>
+       <ivy pattern="${repo.dir}/[organisation]/[module]/[revision]/[module]-[revision].pom"/>
     </filesystem>
 
     <chain name="default" dual="true" checkmodified="true" changingPattern=".*SNAPSHOT">

+ 1 - 0
ivy/libraries.properties

@@ -16,6 +16,7 @@
 #These are the versions of our dependencies (in alphabetical order)
 apacheant.version=1.7.1
 ant-task.version=2.0.10
+avro.version=1.3.1
 
 checkstyle.version=4.2
 

+ 21 - 8
src/java/org/apache/hadoop/hdfs/protocol/ClientProtocol.java

@@ -20,6 +20,8 @@ package org.apache.hadoop.hdfs.protocol;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 
+import org.apache.avro.reflect.Nullable;
+
 import org.apache.hadoop.fs.ContentSummary;
 import org.apache.hadoop.fs.CreateFlag;
 import org.apache.hadoop.fs.FileStatus;
@@ -30,6 +32,7 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
+import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.EnumSetWritable;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.VersionedProtocol;
@@ -81,11 +84,13 @@ public interface ClientProtocol extends VersionedProtocol {
    * @return file length and array of blocks with their locations
    * @throws IOException
    * @throws UnresolvedLinkException if the path contains a symlink.
+   * @throws FileNotFoundException if the path does not exist.
    */
+  @Nullable
   public LocatedBlocks getBlockLocations(String src,
                                          long offset,
                                          long length) 
-    throws IOException, UnresolvedLinkException;
+    throws IOException, UnresolvedLinkException, FileNotFoundException;
 
   /**
    * Get server default values for a number of configuration params.
@@ -125,6 +130,8 @@ public interface ClientProtocol extends VersionedProtocol {
    *                                any quota restriction
    * @throws IOException if other errors occur.
    * @throws UnresolvedLinkException if the path contains a symlink. 
+   * @throws AlreadyBeingCreatedException if the path does not exist.
+   * @throws NSQuotaExceededException if the namespace quota is exceeded.
    */
   public void create(String src, 
                      FsPermission masked,
@@ -133,7 +140,8 @@ public interface ClientProtocol extends VersionedProtocol {
                      boolean createParent,
                      short replication,
                      long blockSize)
-      throws IOException, UnresolvedLinkException;
+    throws IOException, UnresolvedLinkException,
+           AlreadyBeingCreatedException, NSQuotaExceededException;
 
   /**
    * Append to the end of the file. 
@@ -175,10 +183,10 @@ public interface ClientProtocol extends VersionedProtocol {
    * @throws UnresolvedLinkException if the path contains a symlink. 
    */
   public void setPermission(String src, FsPermission permission)
-      throws IOException, UnresolvedLinkException;
+    throws IOException, UnresolvedLinkException, SafeModeException;
 
   /**
-   * Set owner of a path (i.e. a file or a directory).
+   * Set Owner of a path (i.e. a file or a directory).
    * The parameters username and groupname cannot both be null.
    * @param src
    * @param username If it is null, the original username remains unchanged.
@@ -216,10 +224,12 @@ public interface ClientProtocol extends VersionedProtocol {
    * allocated for the current block
    * @return LocatedBlock allocated block information.
    * @throws UnresolvedLinkException if the path contains a symlink. 
+   * @throws DSQuotaExceededException if the directory's quota is exceeded.
    */
   public LocatedBlock addBlock(String src, String clientName,
-      Block previous, DatanodeInfo[] excludedNodes) 
-      throws IOException, UnresolvedLinkException;
+                               @Nullable Block previous,
+                               @Nullable DatanodeInfo[] excludedNodes) 
+    throws IOException, UnresolvedLinkException, DSQuotaExceededException;
 
   /**
    * The client is done writing data to the given filename, and would 
@@ -350,7 +360,7 @@ public interface ClientProtocol extends VersionedProtocol {
    *                                any quota restriction.
    */
   public boolean mkdirs(String src, FsPermission masked, boolean createParent)
-      throws IOException, UnresolvedLinkException;
+    throws IOException, UnresolvedLinkException, NSQuotaExceededException;
 
   /**
    * Get a partial listing of the indicated directory
@@ -525,6 +535,7 @@ public interface ClientProtocol extends VersionedProtocol {
    * @return upgrade status information or null if no upgrades are in progress
    * @throws IOException
    */
+  @Nullable
   public UpgradeStatusReport distributedUpgradeProgress(UpgradeAction action) 
       throws IOException;
 
@@ -552,6 +563,7 @@ public interface ClientProtocol extends VersionedProtocol {
    * @return object containing information regarding the file
    *         or null if file not found
    */
+  @Nullable
   public HdfsFileStatus getFileInfo(String src) 
       throws IOException, UnresolvedLinkException;
 
@@ -595,7 +607,8 @@ public interface ClientProtocol extends VersionedProtocol {
    *                                is greater than the given quota
    */
   public void setQuota(String path, long namespaceQuota, long diskspaceQuota)
-      throws IOException, UnresolvedLinkException;
+    throws IOException, UnresolvedLinkException,
+           FileNotFoundException, SafeModeException;
   
   /**
    * Write all metadata for this file into persistent storage.

+ 2 - 0
src/java/org/apache/hadoop/hdfs/protocol/DSQuotaExceededException.java

@@ -23,6 +23,8 @@ import org.apache.hadoop.util.StringUtils;
 public class DSQuotaExceededException extends QuotaExceededException {
   protected static final long serialVersionUID = 1L;
 
+  public DSQuotaExceededException() {}
+
   public DSQuotaExceededException(String msg) {
     super(msg);
   }

+ 6 - 2
src/java/org/apache/hadoop/hdfs/protocol/DatanodeInfo.java

@@ -33,6 +33,8 @@ import org.apache.hadoop.net.Node;
 import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.util.StringUtils;
 
+import org.apache.avro.reflect.Nullable;
+
 /** 
  * DatanodeInfo represents the status of a DataNode.
  * This object is used for communication in the
@@ -49,10 +51,12 @@ public class DatanodeInfo extends DatanodeID implements Node {
   /** HostName as supplied by the datanode during registration as its 
    * name. Namenode uses datanode IP address as the name.
    */
+  @Nullable
   protected String hostName = null;
   
   // administrative states of a datanode
   public enum AdminStates {NORMAL, DECOMMISSION_INPROGRESS, DECOMMISSIONED; }
+  @Nullable
   protected AdminStates adminState;
 
 
@@ -285,8 +289,8 @@ public class DatanodeInfo extends DatanodeID implements Node {
     }
   }
 
-  private int level; //which level of the tree the node resides
-  private Node parent; //its parent
+  private transient int level; //which level of the tree the node resides
+  private transient Node parent; //its parent
 
   /** Return this node's parent */
   public Node getParent() { return parent; }

+ 4 - 1
src/java/org/apache/hadoop/hdfs/protocol/HdfsFileStatus.java

@@ -29,6 +29,8 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
 
+import org.apache.avro.reflect.Nullable;
+
 /** Interface that represents the over the wire information for a file.
  */
 public class HdfsFileStatus implements Writable {
@@ -41,7 +43,8 @@ public class HdfsFileStatus implements Writable {
   }
 
   private byte[] path;  // local name of the inode that's encoded in java UTF8
-  private byte[] symlink; // symlink target encoded in java UTF8
+  @Nullable
+  private byte[] symlink; // symlink target encoded in java UTF8 or null
   private long length;
   private boolean isdir;
   private short block_replication;

+ 3 - 0
src/java/org/apache/hadoop/hdfs/protocol/LocatedBlocks.java

@@ -29,6 +29,8 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
 
+import org.apache.avro.reflect.Nullable;
+
 /**
  * Collection of blocks with their locations and the file length.
  */
@@ -36,6 +38,7 @@ public class LocatedBlocks implements Writable {
   private long fileLength;
   private List<LocatedBlock> blocks; // array of blocks with prioritized locations
   private boolean underConstruction;
+  @Nullable
   private LocatedBlock lastLocatedBlock = null;
   private boolean isLastBlockComplete = false;
 

+ 2 - 0
src/java/org/apache/hadoop/hdfs/protocol/NSQuotaExceededException.java

@@ -21,6 +21,8 @@ package org.apache.hadoop.hdfs.protocol;
 public final class NSQuotaExceededException extends QuotaExceededException {
   protected static final long serialVersionUID = 1L;
   
+  public NSQuotaExceededException() {}
+
   public NSQuotaExceededException(String msg) {
     super(msg);
   }

+ 2 - 0
src/java/org/apache/hadoop/hdfs/protocol/QuotaExceededException.java

@@ -38,6 +38,8 @@ public class QuotaExceededException extends IOException {
   protected long quota; // quota
   protected long count; // actual value
   
+  protected QuotaExceededException() {}
+
   protected QuotaExceededException(String msg) {
     super(msg);
   }

+ 2 - 2
src/java/org/apache/hadoop/hdfs/security/BlockAccessKey.java

@@ -35,7 +35,7 @@ public class BlockAccessKey implements Writable {
   private long keyID;
   private Text key;
   private long expiryDate;
-  private Mac mac;
+  private transient Mac mac;
 
   public BlockAccessKey() {
     this(0L, new Text(), 0L);
@@ -107,4 +107,4 @@ public class BlockAccessKey implements Writable {
     key.readFields(in);
     expiryDate = WritableUtils.readVLong(in);
   }
-}
+}

+ 1 - 1
src/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java

@@ -572,7 +572,7 @@ public class DataNode extends Configured
       try {
         // reset name to machineName. Mainly for web interface.
         dnRegistration.name = machineName + ":" + dnRegistration.getPort();
-        dnRegistration = namenode.register(dnRegistration);
+        dnRegistration = namenode.registerDatanode(dnRegistration);
         break;
       } catch(SocketTimeoutException e) {  // namenode is busy
         LOG.info("Problem connecting to server: " + getNameNodeAddr());

+ 7 - 9
src/java/org/apache/hadoop/hdfs/server/namenode/NameNode.java

@@ -66,6 +66,7 @@ import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
 import org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeCommand;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
 import org.apache.hadoop.hdfs.server.protocol.NamenodeRegistration;
 import org.apache.hadoop.hdfs.server.protocol.NamespaceInfo;
 import org.apache.hadoop.hdfs.server.protocol.NodeRegistration;
@@ -128,10 +129,7 @@ import org.apache.hadoop.util.StringUtils;
  * secondary namenodes or rebalancing processes to get partial namenode's
  * state, for example partial blocksMap etc.
  **********************************************************/
-public class NameNode implements ClientProtocol, DatanodeProtocol,
-                                 NamenodeProtocol, FSConstants,
-                                 RefreshAuthorizationPolicyProtocol,
-                                 RefreshUserToGroupMappingsProtocol {
+public class NameNode implements NamenodeProtocols, FSConstants {
   static{
     Configuration.addDefaultResource("hdfs-default.xml");
     Configuration.addDefaultResource("hdfs-site.xml");
@@ -301,10 +299,10 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
     NameNode.initMetrics(conf, this.getRole());
     loadNamesystem(conf);
     // create rpc server 
-    this.server = RPC.getServer(this.getClass(), this, socAddr.getHostName(),
-        socAddr.getPort(), handlerCount, false, conf, namesystem
-            .getDelegationTokenSecretManager());
-
+    this.server = RPC.getServer(NamenodeProtocols.class, this,
+                                socAddr.getHostName(), socAddr.getPort(),
+                                handlerCount, false, conf, 
+				namesystem.getDelegationTokenSecretManager());
     // The rpc-server port can be ephemeral... ensure we have the correct info
     this.rpcAddress = this.server.getListenerAddress(); 
     setRpcServerAddress(conf);
@@ -1051,7 +1049,7 @@ public class NameNode implements ClientProtocol, DatanodeProtocol,
   ////////////////////////////////////////////////////////////////
   /** 
    */
-  public DatanodeRegistration register(DatanodeRegistration nodeReg)
+  public DatanodeRegistration registerDatanode(DatanodeRegistration nodeReg)
       throws IOException {
     verifyVersion(nodeReg.getVersion());
     namesystem.registerDatanode(nodeReg);

+ 2 - 0
src/java/org/apache/hadoop/hdfs/server/namenode/SafeModeException.java

@@ -28,6 +28,8 @@ import java.io.IOException;
 public class SafeModeException extends IOException {
   private static final long serialVersionUID = 1L;
 
+  public SafeModeException() {}
+
   public SafeModeException(String text, FSNamesystem.SafeModeInfo mode ) {
     super(text + ". Name node is in safe mode.\n" + mode.getTurnOffTip());
   }

+ 8 - 0
src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeCommand.java

@@ -22,11 +22,19 @@ import java.io.DataOutput;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.io.WritableFactories;
+import org.apache.avro.reflect.Union;
 
 /**
  * Base class for data-node command.
  * Issued by the name-node to notify data-nodes what should be done.
  */
+
+// Declare subclasses for Avro's denormalized representation
+@Union({Void.class,
+      DatanodeCommand.Register.class, DatanodeCommand.Finalize.class,
+      BlockCommand.class, UpgradeCommand.class,
+      BlockRecoveryCommand.class, KeyUpdateCommand.class})
+
 public abstract class DatanodeCommand extends ServerCommand {
   static class Register extends DatanodeCommand {
     private Register() {super(DatanodeProtocol.DNA_REGISTER);}

+ 6 - 3
src/java/org/apache/hadoop/hdfs/server/protocol/DatanodeProtocol.java

@@ -27,6 +27,8 @@ import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.KerberosInfo;
 
+import org.apache.avro.reflect.Nullable;
+
 /**********************************************************************
  * Protocol that a DFS datanode uses to communicate with the NameNode.
  * It's used to upload current load information and block reports.
@@ -38,9 +40,9 @@ import org.apache.hadoop.security.KerberosInfo;
 @KerberosInfo(DFSConfigKeys.DFS_NAMENODE_USER_NAME_KEY)
 public interface DatanodeProtocol extends VersionedProtocol {
   /**
-   * 23: nextGenerationStamp() removed.
+   * 24: register() renamed registerDatanode()
    */
-  public static final long versionID = 23L;
+  public static final long versionID = 24L;
   
   // error code
   final static int NOTIFY = 0;
@@ -71,7 +73,7 @@ public interface DatanodeProtocol extends VersionedProtocol {
    * new storageID if the datanode did not have one and
    * registration ID for further communication.
    */
-  public DatanodeRegistration register(DatanodeRegistration registration
+  public DatanodeRegistration registerDatanode(DatanodeRegistration registration
                                        ) throws IOException;
   /**
    * sendHeartbeat() tells the NameNode that the DataNode is still
@@ -81,6 +83,7 @@ public interface DatanodeProtocol extends VersionedProtocol {
    * A DatanodeCommand tells the DataNode to invalidate local block(s), 
    * or to copy them to other DataNodes, etc.
    */
+  @Nullable
   public DatanodeCommand[] sendHeartbeat(DatanodeRegistration registration,
                                        long capacity,
                                        long dfsUsed, long remaining,

+ 32 - 0
src/java/org/apache/hadoop/hdfs/server/protocol/NamenodeProtocols.java

@@ -0,0 +1,32 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.hdfs.server.protocol;
+
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
+
+/** The full set of RPC methods implemented by the Namenode.  */
+public interface NamenodeProtocols
+  extends ClientProtocol,
+          DatanodeProtocol,
+          NamenodeProtocol,
+          RefreshAuthorizationPolicyProtocol,
+          RefreshUserToGroupMappingsProtocol {
+}

+ 33 - 0
src/test/hdfs/org/apache/hadoop/hdfs/MiniDFSCluster.java

@@ -35,6 +35,13 @@ import org.apache.hadoop.hdfs.protocol.Block;
 import org.apache.hadoop.hdfs.protocol.BlockListAsLongs;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocols;
+import org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol;
+import org.apache.hadoop.security.RefreshUserToGroupMappingsProtocol;
+import org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
 import static org.apache.hadoop.hdfs.server.common.Util.fileAsURI;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
@@ -243,6 +250,28 @@ public class MiniDFSCluster {
     base_dir = new File(getBaseDirectory());
     data_dir = new File(base_dir, "data");
     
+    // use alternate RPC engine if spec'd
+    String rpcEngineName = System.getProperty("hdfs.rpc.engine");
+    if (rpcEngineName != null && !"".equals(rpcEngineName)) {
+      
+      System.out.println("HDFS using RPCEngine: "+rpcEngineName);
+      try {
+        Class rpcEngine = conf.getClassByName(rpcEngineName);
+        setRpcEngine(conf, NamenodeProtocols.class, rpcEngine);
+        setRpcEngine(conf, NamenodeProtocol.class, rpcEngine);
+        setRpcEngine(conf, ClientProtocol.class, rpcEngine);
+        setRpcEngine(conf, DatanodeProtocol.class, rpcEngine);
+        setRpcEngine(conf, RefreshAuthorizationPolicyProtocol.class, rpcEngine);
+        setRpcEngine(conf, RefreshUserToGroupMappingsProtocol.class, rpcEngine);
+      } catch (ClassNotFoundException e) {
+        throw new RuntimeException(e);
+      }
+
+      // disable service authorization, as it does not work with tunnelled RPC
+      conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION,
+                      false);
+    }
+
     // Setup the NameNode configuration
     FileSystem.setDefaultUri(conf, "hdfs://localhost:"+ Integer.toString(nameNodePort));
     conf.set(DFSConfigKeys.DFS_NAMENODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");  
@@ -289,6 +318,10 @@ public class MiniDFSCluster {
     }
   }
   
+  private void setRpcEngine(Configuration conf, Class protocol, Class engine) {
+    conf.setClass("rpc.engine."+protocol.getName(), engine, Object.class);
+  }
+
   /**
    * 
    * @return URI of this MiniDFSCluster

+ 33 - 0
src/test/hdfs/org/apache/hadoop/hdfs/TestDfsOverAvroRpc.java

@@ -0,0 +1,33 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.hdfs;
+
+import java.io.IOException;
+
+/** Test for simple signs of life using Avro RPC.  Not an exhaustive test
+ * yet, just enough to catch fundamental problems using Avro reflection to
+ * infer namenode RPC protocols. */
+public class TestDfsOverAvroRpc extends TestLocalDFS {
+
+  public void testWorkingDirectory() throws IOException {
+    System.setProperty("hdfs.rpc.engine",
+                       "org.apache.hadoop.ipc.AvroRpcEngine");
+    super.testWorkingDirectory();
+  }
+
+}

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/NNThroughputBenchmark.java

@@ -757,7 +757,7 @@ public class NNThroughputBenchmark {
       dnRegistration.setStorageInfo(new DataStorage(nsInfo, ""));
       DataNode.setNewStorageID(dnRegistration);
       // register datanode
-      dnRegistration = nameNode.register(dnRegistration);
+      dnRegistration = nameNode.registerDatanode(dnRegistration);
     }
 
     /**