Forráskód Böngészése

NFS: support starting portmap from hadoop script, more NPE fix

brandonli 12 éve
szülő
commit
75504faf57

+ 4 - 0
bin/hadoop

@@ -77,6 +77,7 @@ print_usage()
   echo "  namenode             run the DFS namenode"
   echo "  datanode             run a DFS datanode"
   echo "  nfs3                 run an NFS version 3 gateway"
+  echo "  portmap              run a portmap service"
   echo "  dfsadmin             run a DFS admin client"
   echo "  mradmin              run a Map-Reduce admin client"
   echo "  fsck                 run a DFS filesystem checking utility"
@@ -282,6 +283,9 @@ elif [ "$COMMAND" = "datanode" ] ; then
 elif [ "$COMMAND" = "nfs3" ] ; then
   CLASS=org.apache.hadoop.hdfs.nfs.nfs3.Nfs3
   HADOOP_OPTS=${HADOOP_OPTS/-server/}
+elif [ "$COMMAND" = "portmap" ] ; then
+  CLASS=org.apache.hadoop.portmap.Portmap
+  HADOOP_OPTS=${HADOOP_OPTS/-server/}
 elif [ "$COMMAND" = "fs" ] ; then
   CLASS=org.apache.hadoop.fs.FsShell
   HADOOP_OPTS="$HADOOP_OPTS $HADOOP_CLIENT_OPTS"

+ 1 - 1
src/core/org/apache/hadoop/nfs/nfs3/response/FSINFO3Response.java

@@ -88,7 +88,7 @@ public class FSINFO3Response extends NFS3Response {
   private final int properties;
 
   public FSINFO3Response(int status) {
-    this(status, null, 0, 0, 0, 0, 0, 0, 0, 0, null, 0);
+    this(status, new Nfs3FileAttributes(), 0, 0, 0, 0, 0, 0, 0, 0, null, 0);
   }
 
   public FSINFO3Response(int status, Nfs3FileAttributes postOpAttr, int rtmax,

+ 4 - 1
src/core/org/apache/hadoop/nfs/nfs3/response/REMOVE3Response.java

@@ -23,7 +23,7 @@ import org.apache.hadoop.oncrpc.XDR;
  * REMOVE3 Response
  */
 public class REMOVE3Response extends NFS3Response {
-  private final WccData dirWcc;
+  private WccData dirWcc;
 
   public REMOVE3Response(int status) {
     this(status, null);
@@ -37,6 +37,9 @@ public class REMOVE3Response extends NFS3Response {
   @Override
   public XDR send(XDR out, int xid) {
     super.send(out, xid);
+    if (dirWcc == null) {
+      dirWcc = new WccData(null, null);
+    }
     dirWcc.serialize(out);
     return out;
   }

+ 4 - 2
src/core/org/apache/hadoop/oncrpc/RpcProgram.java

@@ -141,8 +141,10 @@ public abstract class RpcProgram {
     
     XDR response = handleInternal(rpcCall, xdr, out, client, channel);
     if (response.size() == 0) {
-      LOG.warn("No sync response, expect an async response for request XID="
-          + rpcCall.getXid());
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("No sync response, expect an async response for request XID="
+            + rpcCall.getXid());
+      }
     }
     
     // Add the request to the cache

+ 4 - 0
src/hdfs/org/apache/hadoop/hdfs/nfs/nfs3/RpcProgramNfs3.java

@@ -488,6 +488,10 @@ public class RpcProgramNfs3 extends RpcProgram implements Nfs3Interface {
         LOG.info("Partical read. Asked offset:" + offset + " count:" + count
             + " and read back:" + readCount + "file size:" + attrs.getSize());
       }
+      // HDFS returns -1 for read beyond file size.
+      if (readCount < 0) {
+        readCount = 0;
+      }
       eof = (offset + readCount) < attrs.getSize() ? false : true;
       return new READ3Response(Nfs3Status.NFS3_OK, attrs, readCount, eof,
           readbuffer);

+ 2 - 2
src/hdfs/org/apache/hadoop/hdfs/nfs/nfs3/WriteManager.java

@@ -96,8 +96,8 @@ public class WriteManager {
 
     FileHandle handle = request.getHandle();
     if (LOG.isDebugEnabled()) {
-      LOG.debug("handleWrite handle: " + handle + " offset: " + offset
-          + " length:" + count + " stableHow:" + stableHow.getValue());
+      LOG.debug("handleWrite fileId: " + handle.getFileId() + " offset: "
+          + offset + " length:" + count + " stableHow:" + stableHow.getValue());
     }
 
     // Check if there is a stream to write