Jelajahi Sumber

svn merge -c 1303628 from trunk for HDFS-3100.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1303629 13f79535-47bb-0310-9956-ffa450edef68
Tsz-wo Sze 13 tahun lalu
induk
melakukan
0b8249ac09

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -234,6 +234,9 @@ Release 0.23.3 - UNRELEASED
     HDFS-3083. Cannot run an MR job with HA and security enabled when
     second-listed NN active. (atm)
 
+    HDFS-3100. In BlockSender, throw an exception when it needs to verify
+    checksum but the meta data does not exist.  (Brandon Li via szetszwo)
+
   BREAKDOWN OF HDFS-1623 SUBTASKS
 
     HDFS-2179. Add fencing framework and mechanisms for NameNode HA. (todd)

+ 13 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/BlockSender.java

@@ -22,6 +22,7 @@ import java.io.DataInputStream;
 import java.io.DataOutputStream;
 import java.io.FileDescriptor;
 import java.io.FileInputStream;
+import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.OutputStream;
@@ -218,9 +219,21 @@ class BlockSender implements java.io.Closeable {
       this.transferToAllowed = datanode.getDnConf().transferToAllowed &&
         (!is32Bit || length <= Integer.MAX_VALUE);
 
+      /* 
+       * (corruptChecksumOK, meta_file_exist): operation
+       * True,   True: will verify checksum  
+       * True,  False: No verify, e.g., need to read data from a corrupted file 
+       * False,  True: will verify checksum
+       * False, False: throws IOException file not found
+       */
       DataChecksum csum;
       final InputStream metaIn = datanode.data.getMetaDataInputStream(block);
       if (!corruptChecksumOk || metaIn != null) {
+      	if (metaIn == null) {
+          //need checksum but meta-data not found
+          throw new FileNotFoundException("Meta-data not found for " + block);
+        } 
+      	
         checksumIn = new DataInputStream(
             new BufferedInputStream(metaIn, HdfsConstants.IO_FILE_BUFFER_SIZE));
 

+ 26 - 3
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/AppendTestUtil.java

@@ -18,17 +18,21 @@
 package org.apache.hadoop.hdfs;
 
 import java.io.IOException;
-import java.io.InputStream;
 import java.io.OutputStream;
 import java.util.Random;
 
-import junit.framework.TestCase;
 import junit.framework.Assert;
+import junit.framework.TestCase;
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.*;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
 
 /** Utilities for append-related tests */ 
@@ -176,4 +180,23 @@ public class AppendTestUtil {
       actual[idx] = 0;
     }
   }
+
+  public static void testAppend(FileSystem fs, Path p) throws IOException {
+    final byte[] bytes = new byte[1000];
+
+    { //create file
+      final FSDataOutputStream out = fs.create(p, (short)1);
+      out.write(bytes);
+      out.close();
+      Assert.assertEquals(bytes.length, fs.getFileStatus(p).getLen());
+    }
+
+    for(int i = 2; i < 500; i++) {
+      //append
+      final FSDataOutputStream out = fs.append(p);
+      out.write(bytes);
+      out.close();
+      Assert.assertEquals(i*bytes.length, fs.getFileStatus(p).getLen());
+    }
+  }
 }

+ 7 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestHDFSFileSystemContract.java

@@ -18,8 +18,11 @@
 
 package org.apache.hadoop.hdfs;
 
+import java.io.IOException;
+
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
+import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.security.UserGroupInformation;
 
 public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
@@ -46,5 +49,8 @@ public class TestHDFSFileSystemContract extends FileSystemContractBaseTest {
   protected String getDefaultWorkingDirectory() {
     return defaultWorkingDirectory;
   }
-  
+
+  public void testAppend() throws IOException {
+    AppendTestUtil.testAppend(fs, new Path("/testAppend/f"));
+  }
 }

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestWebHdfsFileSystemContract.java

@@ -37,6 +37,7 @@ import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystemContractBaseTest;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.hdfs.AppendTestUtil;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.web.resources.DoAsParam;
@@ -333,5 +334,9 @@ public class TestWebHdfsFileSystemContract extends FileSystemContractBaseTest {
       assertEquals((short)0755, webhdfs.getFileStatus(dir).getPermission().toShort());
       conn.disconnect();
     }
+
+    {//test append.
+      AppendTestUtil.testAppend(fs, new Path(dir, "append"));
+    }
   }
 }