Forráskód Böngészése

Merging changes r1086461:r1087160 from trunk to federation


git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1089696 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 14 éve
szülő
commit
c8906bde45

+ 23 - 7
CHANGES.txt

@@ -251,10 +251,6 @@ Trunk (unreleased changes)
     HDFS-1791. Federation: Add command to delete block pool directories 
     from a datanode. (jitendra)
 
-    HDFS-1785. In BlockReceiver and DataXceiver, clientName.length() is used
-    multiple times for determining whether the source is a client or a
-    datanode.  (szetszwo)
-
   IMPROVEMENTS
 
     HDFS-1510. Added test-patch.properties required by test-patch.sh (nigel)
@@ -300,9 +296,6 @@ Trunk (unreleased changes)
     HDFS-1736. Remove the dependency from DatanodeJspHelper to FsShell.
     (Daryn Sharp via szetszwo)
     
-    HDFS-1731. Amend previous commit for this JIRA to fix build on Cygwin.
-    (todd)
-
     HDFS-780. Revive TestFuseDFS. (eli)
 
     HDFS-1445. Batch the calls in DataStorage to FileUtil.createHardLink().
@@ -317,6 +310,16 @@ Trunk (unreleased changes)
     HDFS-1120. Make DataNode's block-to-device placement policy pluggable
     (Harsh J Chouraria via todd)
 
+    HDFS-1785. In BlockReceiver and DataXceiver, clientName.length() is used
+    multiple times for determining whether the source is a client or a
+    datanode.  (szetszwo)
+
+    HDFS-1789. Refactor frequently used codes from DFSOutputStream and
+    DataXceiver.  (szetszwo)
+
+    HDFS-1767. Namenode ignores non-initial block report from datanodes
+    when in safemode during startup. (Matt Foley via suresh)
+
   OPTIMIZATIONS
 
     HDFS-1458. Improve checkpoint performance by avoiding unnecessary image
@@ -379,6 +382,11 @@ Trunk (unreleased changes)
 
     HDFS-1770. TestFiRename fails due to invalid block size. (eli)
 
+    HDFS-1797. Fix new findbugs warning introduced by HDFS-1120 (todd)
+
+    HDFS-1611. Fix up some log messages in DFSClient and MBean registration
+    (Uma Maheswara Rao G via todd)
+
 Release 0.22.0 - Unreleased
 
   NEW FEATURES
@@ -832,6 +840,11 @@ Release 0.22.0 - Unreleased
     HDFS-1001. DataXceiver and BlockReader disagree on when to send/recv
     CHECKSUM_OK. (bc Wong via eli)
 
+    HDFS-1781. Fix the path for jsvc in bin/hdfs.  (John George via szetszwo)
+
+    HDFS-1782. Fix an NPE in RFSNamesystem.startFileInternal(..).
+    (John George via szetszwo)
+
 Release 0.21.1 - Unreleased
 
     HDFS-1411. Correct backup node startup command in hdfs user guide.
@@ -885,6 +898,9 @@ Release 0.21.1 - Unreleased
     HDFS-1596. Replace fs.checkpoint.* with dfs.namenode.checkpoint.*
     in documentations.  (Harsh J Chouraria via szetszwo)
 
+    HDFS-1786. Some cli test cases expect a "null" message
+    (Uma Maheswara Rao G via todd)
+
 Release 0.21.0 - 2010-08-13
 
   INCOMPATIBLE CHANGES

+ 1 - 1
bin/hdfs

@@ -143,7 +143,7 @@ if [ "$starting_secure_dn" = "true" ]; then
    HADOOP_SECURE_DN_PID="$HADOOP_PID_DIR/hadoop_secure_dn.pid"
   fi
 
-  exec "$HADOOP_HOME/bin/jsvc" \
+  exec "$HADOOP_HDFS_HOME/bin/jsvc" \
            -Dproc_$COMMAND -outfile "$HADOOP_LOG_DIR/jsvc.out" \
            -errfile "$HADOOP_LOG_DIR/jsvc.err" \
            -pidfile "$HADOOP_SECURE_DN_PID" \

+ 1 - 1
src/java/org/apache/hadoop/hdfs/DFSClient.java

@@ -192,7 +192,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     InetSocketAddress addr = NetUtils.createSocketAddr(
       datanodeid.getHost() + ":" + datanodeid.getIpcPort());
     if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
-      ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
+      ClientDatanodeProtocol.LOG.debug("ClientDatanodeProtocol addr=" + addr);
     }
     UserGroupInformation ticket = UserGroupInformation
         .createRemoteUser(locatedBlock.getBlock().getLocalBlock().toString());

+ 31 - 19
src/java/org/apache/hadoop/hdfs/DFSOutputStream.java

@@ -24,8 +24,8 @@ import java.io.BufferedOutputStream;
 import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.FileNotFoundException;
-import java.io.InterruptedIOException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.net.InetSocketAddress;
 import java.net.Socket;
 import java.nio.BufferOverflowException;
@@ -46,21 +46,23 @@ import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.hdfs.protocol.ClientProtocol;
 import org.apache.hadoop.hdfs.protocol.DSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
 import org.apache.hadoop.hdfs.protocol.HdfsFileStatus;
 import org.apache.hadoop.hdfs.protocol.LocatedBlock;
 import org.apache.hadoop.hdfs.protocol.NSQuotaExceededException;
 import org.apache.hadoop.hdfs.protocol.UnresolvedPathException;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.BlockConstructionStage;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PacketHeader;
-import org.apache.hadoop.hdfs.protocol.DataTransferProtocol.PipelineAck;
+import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
+import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.SafeModeException;
 import org.apache.hadoop.io.EnumSetWritable;
-import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
@@ -70,8 +72,6 @@ import org.apache.hadoop.util.DataChecksum;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.PureJavaCrc32;
 import org.apache.hadoop.util.StringUtils;
-import org.apache.hadoop.hdfs.security.token.block.InvalidBlockTokenException;
-import org.apache.hadoop.hdfs.security.token.block.BlockTokenIdentifier;
 
 /****************************************************************
  * DFSOutputStream creates files from a stream of bytes.
@@ -888,18 +888,7 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
 
       boolean result = false;
       try {
-        if(DFSClient.LOG.isDebugEnabled()) {
-          DFSClient.LOG.debug("Connecting to " + nodes[0].getName());
-        }
-        InetSocketAddress target = NetUtils.createSocketAddr(nodes[0].getName());
-        s = dfsClient.socketFactory.createSocket();
-        int timeoutValue = dfsClient.getDatanodeReadTimeout(nodes.length);
-        NetUtils.connect(s, target, timeoutValue);
-        s.setSoTimeout(timeoutValue);
-        s.setSendBufferSize(DFSClient.DEFAULT_DATA_SOCKET_SIZE);
-        if(DFSClient.LOG.isDebugEnabled()) {
-          DFSClient.LOG.debug("Send buf size " + s.getSendBufferSize());
-        }
+        s = createSocketForPipeline(nodes, dfsClient);
         long writeTimeout = dfsClient.getDatanodeWriteTimeout(nodes.length);
 
         //
@@ -1035,6 +1024,29 @@ class DFSOutputStream extends FSOutputSummer implements Syncable {
     }
   }
 
+  /**
+   * Create a socket for a write pipeline
+   * @param datanodes the datanodes on the pipeline 
+   * @param client
+   * @return the socket connected to the first datanode
+   */
+  static Socket createSocketForPipeline(final DatanodeInfo[] datanodes,
+      final DFSClient client) throws IOException {
+    if(DFSClient.LOG.isDebugEnabled()) {
+      DFSClient.LOG.debug("Connecting to datanode " + datanodes[0].getName());
+    }
+    final InetSocketAddress isa = NetUtils.createSocketAddr(datanodes[0].getName());
+    final Socket sock = client.socketFactory.createSocket();
+    final int timeout = client.getDatanodeReadTimeout(datanodes.length);
+    NetUtils.connect(sock, isa, timeout);
+    sock.setSoTimeout(timeout);
+    sock.setSendBufferSize(DFSClient.DEFAULT_DATA_SOCKET_SIZE);
+    if(DFSClient.LOG.isDebugEnabled()) {
+      DFSClient.LOG.debug("Send buf size " + sock.getSendBufferSize());
+    }
+    return sock;
+  }
+
   private void isClosed() throws IOException {
     if (closed) {
       IOException e = lastException;

+ 48 - 68
src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -153,24 +153,9 @@ class DataXceiver extends DataTransferProtocol.Receiver
         datanode.socketWriteTimeout);
     DataOutputStream out = new DataOutputStream(
                  new BufferedOutputStream(baseStream, SMALL_BUFFER_SIZE));
-
-    if (datanode.isBlockTokenEnabled) {
-      try {
-        datanode.blockPoolTokenSecretManager.checkAccess(blockToken, null, block,
-            BlockTokenSecretManager.AccessMode.READ);
-      } catch (InvalidToken e) {
-        try {
-          ERROR_ACCESS_TOKEN.write(out);
-          out.flush();
-          LOG.warn("Block token verification failed, for client "
-              + remoteAddress + " for OP_READ_BLOCK for block " + block + " : "
-              + e.getLocalizedMessage());
-          throw e;
-        } finally {
-          IOUtils.closeStream(out);
-        }
-      }
-    }
+    checkAccess(out, block, blockToken,
+        DataTransferProtocol.Op.READ_BLOCK,
+        BlockTokenSecretManager.AccessMode.READ);
   
     // send the block
     BlockSender blockSender = null;
@@ -250,32 +235,14 @@ class DataXceiver extends DataTransferProtocol.Receiver
              " src: " + remoteAddress +
              " dest: " + localAddress);
 
-    DataOutputStream replyOut = null;   // stream to prev target
-    replyOut = new DataOutputStream(new BufferedOutputStream(
-                   NetUtils.getOutputStream(s, datanode.socketWriteTimeout),
-                   SMALL_BUFFER_SIZE));
-    DatanodeRegistration dnR = 
-      datanode.getDNRegistrationForBP(block.getBlockPoolId());
-    if (datanode.isBlockTokenEnabled) {
-      try {
-        datanode.blockPoolTokenSecretManager.checkAccess(blockToken, null, block,
-            BlockTokenSecretManager.AccessMode.WRITE);
-      } catch (InvalidToken e) {
-        try {
-          if (isClient) {
-            ERROR_ACCESS_TOKEN.write(replyOut);
-            Text.writeString(replyOut, dnR.getName());
-            replyOut.flush();
-          }
-          LOG.warn("Block token verification failed, for client "
-              + remoteAddress + " for OP_WRITE_BLOCK for block " + block
-              + " : " + e.getLocalizedMessage());
-          throw e;
-        } finally {
-          IOUtils.closeStream(replyOut);
-        }
-      }
-    }
+    // reply to upstream datanode or client 
+    final DataOutputStream replyOut = new DataOutputStream(
+        new BufferedOutputStream(
+            NetUtils.getOutputStream(s, datanode.socketWriteTimeout),
+            SMALL_BUFFER_SIZE));
+    checkAccess(isClient? replyOut: null, block, blockToken,
+        DataTransferProtocol.Op.WRITE_BLOCK,
+        BlockTokenSecretManager.AccessMode.WRITE);
 
     DataOutputStream mirrorOut = null;  // stream to next target
     DataInputStream mirrorIn = null;    // reply from next target
@@ -298,8 +265,7 @@ class DataXceiver extends DataTransferProtocol.Receiver
       }
 
       //
-      // Open network conn to backup machine, if 
-      // appropriate
+      // Connect to downstream machine, if appropriate
       //
       if (targets.length > 0) {
         InetSocketAddress mirrorTarget = null;
@@ -321,7 +287,6 @@ class DataXceiver extends DataTransferProtocol.Receiver
                          SMALL_BUFFER_SIZE));
           mirrorIn = new DataInputStream(NetUtils.getInputStream(mirrorSock));
 
-          // Write header: Copied from DFSClient.java!
           DataTransferProtocol.Sender.opWriteBlock(mirrorOut, originalBlock,
               pipelineSize, stage, newGs, minBytesRcvd, maxBytesRcvd, clientname,
               srcDataNode, targets, blockToken);
@@ -358,7 +323,7 @@ class DataXceiver extends DataTransferProtocol.Receiver
           if (isClient) {
             throw e;
           } else {
-            LOG.info(dnR + ":Exception transfering block " +
+            LOG.info(datanode + ":Exception transfering block " +
                      block + " to mirror " + mirrorNode +
                      ". continuing without the mirror.\n" +
                      StringUtils.stringifyException(e));
@@ -430,26 +395,11 @@ class DataXceiver extends DataTransferProtocol.Receiver
   @Override
   protected void opBlockChecksum(DataInputStream in, ExtendedBlock block,
       Token<BlockTokenIdentifier> blockToken) throws IOException {
-    DataOutputStream out = new DataOutputStream(NetUtils.getOutputStream(s,
-        datanode.socketWriteTimeout));
-    if (datanode.isBlockTokenEnabled) {
-      try {
-        datanode.blockPoolTokenSecretManager.checkAccess(blockToken, null,
-            block, BlockTokenSecretManager.AccessMode.READ);
-      } catch (InvalidToken e) {
-        try {
-          ERROR_ACCESS_TOKEN.write(out);
-          out.flush();
-          LOG.warn("Block token verification failed, for client "
-              + remoteAddress + " for OP_BLOCK_CHECKSUM for block " + block
-              + " : " + e.getLocalizedMessage());
-          throw e;
-        } finally {
-          IOUtils.closeStream(out);
-        }
-      }
-    }
-
+    final DataOutputStream out = new DataOutputStream(
+        NetUtils.getOutputStream(s, datanode.socketWriteTimeout));
+    checkAccess(out, block, blockToken,
+        DataTransferProtocol.Op.BLOCK_CHECKSUM,
+        BlockTokenSecretManager.AccessMode.READ);
     updateCurrentThreadName("Reading metadata for block " + block);
     final MetaDataInputStream metadataIn = 
       datanode.data.getMetaDataInputStream(block);
@@ -703,4 +653,34 @@ class DataXceiver extends DataTransferProtocol.Receiver
       IOUtils.closeStream(reply);
     }
   }
+
+  private void checkAccess(final DataOutputStream out, 
+      final ExtendedBlock blk,
+      final Token<BlockTokenIdentifier> t,
+      final DataTransferProtocol.Op op,
+      final BlockTokenSecretManager.AccessMode mode) throws IOException {
+    if (datanode.isBlockTokenEnabled) {
+      try {
+        datanode.blockPoolTokenSecretManager.checkAccess(t, null, blk, mode);
+      } catch(InvalidToken e) {
+        try {
+          if (out != null) {
+            ERROR_ACCESS_TOKEN.write(out);
+            if (mode == BlockTokenSecretManager.AccessMode.WRITE) {
+              DatanodeRegistration dnR = 
+                datanode.getDNRegistrationForBP(blk.getBlockPoolId());
+              Text.writeString(out, dnR.getName());
+            }
+            out.flush();
+          }
+          LOG.warn("Block token verification failed: op=" + op
+              + ", remoteAddress=" + remoteAddress
+              + ", message=" + e.getLocalizedMessage());
+          throw e;
+        } finally {
+          IOUtils.closeStream(out);
+        }
+      }
+    }
+  }
 }

+ 21 - 2
src/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java

@@ -1410,7 +1410,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
                 ". Lease recovery is in progress. Try again later.");
 
         } else {
-          if(pendingFile.getLastBlock().getBlockUCState() ==
+          BlockInfoUnderConstruction lastBlock=pendingFile.getLastBlock();
+          if(lastBlock != null && lastBlock.getBlockUCState() ==
             BlockUCState.UNDER_RECOVERY) {
             throw new RecoveryInProgressException(
               "Recovery in progress, file [" + src + "], " +
@@ -3144,6 +3145,14 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
       throw new IOException("ProcessReport from dead or unregisterted node: "
                             + nodeID.getName());
     }
+    // To minimize startup time, we discard any second (or later) block reports
+    // that we receive while still in startup phase.
+    if (isInStartupSafeMode() && node.numBlocks() > 0) {
+      NameNode.stateChangeLog.info("BLOCK* NameSystem.processReport: "
+          + "discarded non-initial block report from " + nodeID.getName()
+          + " because namenode still in startup phase");
+      return;
+    }
 
     blockManager.processReport(node, newReport);
     NameNode.getNameNodeMetrics().blockReport.inc((int) (now() - startTime));
@@ -4221,6 +4230,15 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
       return false;
     return safeMode.isOn();
   }
+  
+  /**
+   * Check whether the name node is in startup mode.
+   */
+  synchronized boolean isInStartupSafeMode() {
+    if (safeMode == null)
+      return false;
+    return safeMode.isOn() && !safeMode.isManual();
+  }
 
   /**
    * Check whether replication queues are populated.
@@ -4594,7 +4612,8 @@ public class FSNamesystem implements FSConstants, FSNamesystemMBean, FSClusterSt
       bean = new StandardMBean(this,FSNamesystemMBean.class);
       mbeanName = MBeanUtil.registerMBean("NameNode", "FSNamesystemState", bean);
     } catch (NotCompliantMBeanException e) {
-      e.printStackTrace();
+      LOG.warn("Exception in initializing StandardMBean as FSNamesystemMBean",
+	  e);
     }
 
     LOG.info("Registered FSNamesystemStatusMBean");

+ 23 - 19
src/test/hdfs/org/apache/hadoop/cli/testHDFSConf.xml

@@ -5024,8 +5024,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>TokenComparator</type>
-          <expected-output>get: null</expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>get: .*: No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5040,8 +5040,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>TokenComparator</type>
-          <expected-output>get: null</expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>get: .*: No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5056,8 +5056,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>TokenComparator</type>
-          <expected-output>get: null</expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>get: .*: No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5072,8 +5072,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>TokenComparator</type>
-          <expected-output>get: null</expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>get: .*: No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5459,8 +5459,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>TokenComparator</type>
-          <expected-output>copyToLocal: null</expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>copyToLocal: .*: No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5475,8 +5475,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>TokenComparator</type>
-          <expected-output>copyToLocal: null</expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>copyToLocal: .*: No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5491,8 +5491,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>TokenComparator</type>
-          <expected-output>copyToLocal: null</expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>copyToLocal: .*: No such file or directory</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -5507,8 +5507,8 @@
       </cleanup-commands>
       <comparators>
         <comparator>
-          <type>TokenComparator</type>
-          <expected-output>copyToLocal: null</expected-output>
+          <type>RegexpComparator</type>
+          <expected-output>copyToLocal: .*: No such file or directory</expected-output>
         </comparator>
      </comparators>
     </test>
@@ -15593,11 +15593,15 @@
       <comparators>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^-touchz &lt;path&gt;: Write a timestamp in yyyy-MM-dd HH:mm:ss format( )*</expected-output>
+          <expected-output>^-touchz &lt;path&gt;: Creates a file of zero length( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*at &lt;path&gt; with current time as the timestamp of that &lt;path&gt;.( )*</expected-output>
         </comparator>
         <comparator>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*in a file at &lt;path&gt;. An error is returned if the file exists with non-zero length( )*</expected-output>
+          <expected-output>^( |\t)* An error is returned if the file exists with non-zero length( )*</expected-output>
         </comparator>
       </comparators>
     </test>
@@ -16927,7 +16931,7 @@
         <command>-fs NAMENODE -mkdir /test </command>
         <command>-fs NAMENODE -touchz /test/file1 </command>
         <dfs-admin-command>-fs NAMENODE -safemode enter </dfs-admin-command>
-        <command>-fs NAMENODE -chown root /test/file1 </command>
+        <command>-fs NAMENODE -chown newgroup /test/file1 </command>
       </test-commands>
       <cleanup-commands>
         <dfs-admin-command>-fs NAMENODE -safemode leave </dfs-admin-command>