Browse Source

Merging changes r1036749:r1038001 excluding r1036767 from trunk to federation

git-svn-id: https://svn.apache.org/repos/asf/hadoop/hdfs/branches/HDFS-1052@1078887 13f79535-47bb-0310-9956-ffa450edef68
Suresh Srinivas 14 years ago
parent
commit
ab67d98605

+ 11 - 5
CHANGES.txt

@@ -307,7 +307,6 @@ Release 0.22.0 - Unreleased
 
     HDFS-1096. fix for prev. commit. (boryas)
 
-
     HDFS-1096. allow dfsadmin/mradmin refresh of superuser proxy group
      mappings (boryas)
 
@@ -457,14 +456,15 @@ Release 0.22.0 - Unreleased
     HDFS-1500. TestOfflineImageViewer failing on trunk. (Todd Lipcon
     via hairong)
 
-    HDFS-1467. Append pipeline construction not succeeds with more than
-    one replica. (Todd Lipcon via hairong)
-
     HDFS-1483. DFSClient.getBlockLocations should indicate if corresponding
     blocks are corrupt. (Patrick Kling via hairong)
 
     HDFS-259. Remove intentionally corrupt 0.13 directory layout creation.
-    (Todd Lipcon via eli).
+    (Todd Lipcon via eli)
+
+    HDFS-1513. Fix a number of warnings. (eli)
+
+    HDFS-1481. NameNode should validate fsimage before rolling. (hairong)
 
   OPTIMIZATIONS
 
@@ -633,6 +633,12 @@ Release 0.22.0 - Unreleased
     HDFS-1487. FSDirectory.removeBlock() should update diskspace count 
     of the block owner node (Zhong Wang via eli).
 
+    HDFS-1467. Append pipeline never succeeds with more than one replica.
+    (Todd Lipcon via eli)
+
+    HDFS-1167. New property for local conf directory in system-test-hdfs.xml
+    file. (Vinay Thota via cos)
+
 Release 0.21.1 - Unreleased
 
     HDFS-1411. Correct backup node startup command in hdfs user guide.

+ 1 - 1
src/java/org/apache/hadoop/fs/Hdfs.java

@@ -319,7 +319,7 @@ public class Hdfs extends AbstractFileSystem {
   @Override
   public void renameInternal(Path src, Path dst) 
     throws IOException, UnresolvedLinkException {
-    dfs.rename(getUriPath(src), getUriPath(dst));
+    dfs.rename(getUriPath(src), getUriPath(dst), Options.Rename.NONE);
   }
 
   @Override

+ 1 - 1
src/java/org/apache/hadoop/hdfs/DistributedFileSystem.java

@@ -183,7 +183,7 @@ public class DistributedFileSystem extends FileSystem {
   /** {@inheritDoc} */
   @Override
   public Path getHomeDirectory() {
-    return new Path("/user/" + dfs.ugi.getShortUserName()).makeQualified(this);
+    return makeQualified(new Path("/user/" + dfs.ugi.getShortUserName()));
   }
 
   private String getPathName(Path file) {

+ 4 - 4
src/java/org/apache/hadoop/hdfs/HftpFileSystem.java

@@ -353,13 +353,13 @@ public class HftpFileSystem extends FileSystem {
               Long.valueOf(attrs.getValue("blocksize")).longValue(),
               modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
               attrs.getValue("owner"), attrs.getValue("group"),
-              new Path(getUri().toString(), attrs.getValue("path"))
-                .makeQualified(HftpFileSystem.this))
+              HftpFileSystem.this.makeQualified(
+                  new Path(getUri().toString(), attrs.getValue("path"))))
         : new FileStatus(0L, true, 0, 0L,
               modif, atime, FsPermission.valueOf(attrs.getValue("permission")),
               attrs.getValue("owner"), attrs.getValue("group"),
-              new Path(getUri().toString(), attrs.getValue("path"))
-                .makeQualified(HftpFileSystem.this));
+              HftpFileSystem.this.makeQualified(
+                  new Path(getUri().toString(), attrs.getValue("path"))));
       fslist.add(fs);
     }
 

+ 14 - 4
src/java/org/apache/hadoop/hdfs/server/datanode/BlockReceiver.java

@@ -33,7 +33,6 @@ import java.util.LinkedList;
 import java.util.zip.Checksum;
 
 import org.apache.commons.logging.Log;
-import org.apache.hadoop.fs.FSInputChecker;
 import org.apache.hadoop.fs.FSOutputSummer;
 import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
 import org.apache.hadoop.hdfs.protocol.ExtendedBlock;
@@ -90,7 +89,7 @@ class BlockReceiver implements java.io.Closeable, FSConstants {
                 String clientName, DatanodeInfo srcDataNode, DataNode datanode)
                 throws IOException {
     try{
-      this.block = new ExtendedBlock(inBlock);
+      this.block = inBlock;
       this.in = in;
       this.inAddr = inAddr;
       this.myAddr = myAddr;
@@ -699,6 +698,17 @@ class BlockReceiver implements java.io.Closeable, FSConstants {
     datanode.data.adjustCrcChannelPosition(block, streams, checksumSize);
   }
 
+  /**
+   * Convert a checksum byte array to a long
+   */
+  static private long checksum2long(byte[] checksum) {
+    long crc = 0L;
+    for(int i=0; i<checksum.length; i++) {
+      crc |= (0xffL&(long)checksum[i])<<((checksum.length-i-1)*8);
+    }
+    return crc;
+  }
+
   /**
    * reads in the partial crc chunk and computes checksum
    * of pre-existing data in partial chunk.
@@ -740,11 +750,11 @@ class BlockReceiver implements java.io.Closeable, FSConstants {
 
     // paranoia! verify that the pre-computed crc matches what we
     // recalculated just now
-    if (partialCrc.getValue() != FSInputChecker.checksum2long(crcbuf)) {
+    if (partialCrc.getValue() != checksum2long(crcbuf)) {
       String msg = "Partial CRC " + partialCrc.getValue() +
                    " does not match value computed the " +
                    " last time file was closed " +
-                   FSInputChecker.checksum2long(crcbuf);
+                   checksum2long(crcbuf);
       throw new IOException(msg);
     }
   }

+ 0 - 1
src/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -242,7 +242,6 @@ class DataXceiver extends DataTransferProtocol.Receiver
     // forward the original version of the block to downstream mirrors, so
     // make a copy here.
     final ExtendedBlock originalBlock = new ExtendedBlock(block);
-
     block.setNumBytes(dataXceiverServer.estimateBlockSize);
     LOG.info("Receiving block " + block + 
              " src: " + remoteAddress +

+ 4 - 3
src/java/org/apache/hadoop/hdfs/server/namenode/Checkpointer.java

@@ -187,7 +187,7 @@ class Checkpointer extends Daemon {
     File[] files = list.toArray(new File[list.size()]);
     assert files.length > 0 : "No checkpoint targets.";
     String nnHttpAddr = backupNode.nnHttpAddress;
-    TransferFsImage.getFileClient(nnHttpAddr, fileid, files);
+    TransferFsImage.getFileClient(nnHttpAddr, fileid, files, false);
     LOG.info("Downloaded file " + files[0].getName() + " size " +
              files[0].length() + " bytes.");
 
@@ -196,7 +196,7 @@ class Checkpointer extends Daemon {
     list = getFSImage().getFiles(NameNodeFile.EDITS, NameNodeDirType.EDITS);
     files = list.toArray(new File[list.size()]);
     assert files.length > 0 : "No checkpoint targets.";
-    TransferFsImage.getFileClient(nnHttpAddr, fileid, files);
+    TransferFsImage.getFileClient(nnHttpAddr, fileid, files, false);
     LOG.info("Downloaded file " + files[0].getName() + " size " +
         files[0].length() + " bytes.");
   }
@@ -213,7 +213,8 @@ class Checkpointer extends Daemon {
       "&token=" + sig.toString() +
       "&newChecksum=" + getFSImage().imageDigest.toString();
     LOG.info("Posted URL " + backupNode.nnHttpAddress + fileid);
-    TransferFsImage.getFileClient(backupNode.nnHttpAddress, fileid, (File[])null);
+    TransferFsImage.getFileClient(backupNode.nnHttpAddress, 
+        fileid, (File[])null, false);
   }
 
   /**

+ 12 - 5
src/java/org/apache/hadoop/hdfs/server/namenode/GetImageServlet.java

@@ -35,6 +35,7 @@ import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.util.DataTransferThrottler;
+import org.apache.hadoop.io.MD5Hash;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 
@@ -91,14 +92,20 @@ public class GetImageServlet extends HttpServlet {
             // issue a HTTP get request to download the new fsimage 
             nnImage.validateCheckpointUpload(ff.getToken());
             nnImage.newImageDigest = ff.getNewChecksum();
-            reloginIfNecessary().doAs(new PrivilegedExceptionAction<Void>() {
+            MD5Hash downloadImageDigest = reloginIfNecessary().doAs(
+                new PrivilegedExceptionAction<MD5Hash>() {
                 @Override
-                public Void run() throws Exception {
-                  TransferFsImage.getFileClient(ff.getInfoServer(), "getimage=1", 
-                      nnImage.getFsImageNameCheckpoint());
-                  return null;
+                public MD5Hash run() throws Exception {
+                  return TransferFsImage.getFileClient(
+                      ff.getInfoServer(), "getimage=1", 
+                      nnImage.getFsImageNameCheckpoint(), true);
                 }
             });
+            if (!nnImage.newImageDigest.equals(downloadImageDigest)) {
+              throw new IOException("The downloaded image is corrupt," +
+                  " expecting a checksum " + nnImage.newImageDigest +
+                  " but received a checksum " + downloadImageDigest);
+            }
            nnImage.checkpointUploadDone();
           }
           return null;

+ 3 - 3
src/java/org/apache/hadoop/hdfs/server/namenode/SecondaryNameNode.java

@@ -343,7 +343,7 @@ public class SecondaryNameNode implements Runnable {
                 NameNodeDirType.IMAGE);
             File[] srcNames = list.toArray(new File[list.size()]);
             assert srcNames.length > 0 : "No checkpoint targets.";
-            TransferFsImage.getFileClient(fsName, fileid, srcNames);
+            TransferFsImage.getFileClient(fsName, fileid, srcNames, false);
             LOG.info("Downloaded file " + srcNames[0].getName() + " size " +
                      srcNames[0].length() + " bytes.");
         
@@ -352,7 +352,7 @@ public class SecondaryNameNode implements Runnable {
             list = getFSImage().getFiles(NameNodeFile.EDITS, NameNodeDirType.EDITS);
             srcNames = list.toArray(new File[list.size()]);;
             assert srcNames.length > 0 : "No checkpoint targets.";
-            TransferFsImage.getFileClient(fsName, fileid, srcNames);
+            TransferFsImage.getFileClient(fsName, fileid, srcNames, false);
             LOG.info("Downloaded file " + srcNames[0].getName() + " size " +
                 srcNames[0].length() + " bytes.");
         
@@ -378,7 +378,7 @@ public class SecondaryNameNode implements Runnable {
       "&token=" + sig.toString() +
       "&newChecksum=" + checkpointImage.imageDigest;
     LOG.info("Posted URL " + fsName + fileid);
-    TransferFsImage.getFileClient(fsName, fileid, (File[])null);
+    TransferFsImage.getFileClient(fsName, fileid, (File[])null, false);
   }
 
   /**

+ 12 - 1
src/java/org/apache/hadoop/hdfs/server/namenode/TransferFsImage.java

@@ -19,6 +19,8 @@ package org.apache.hadoop.hdfs.server.namenode;
 
 import java.io.*;
 import java.net.*;
+import java.security.DigestInputStream;
+import java.security.MessageDigest;
 import java.util.Iterator;
 import java.util.Map;
 import java.lang.Math;
@@ -169,8 +171,11 @@ class TransferFsImage implements FSConstants {
   /**
    * Client-side Method to fetch file from a server
    * Copies the response from the URL to a list of local files.
+   * 
+   * @Return a digest of the received file if getChecksum is true
    */
-  static void getFileClient(String fsName, String id, File[] localPath)
+  static MD5Hash getFileClient(String fsName, String id, File[] localPath,
+      boolean getChecksum)
     throws IOException {
     byte[] buf = new byte[BUFFER_SIZE];
     String proto = UserGroupInformation.isSecurityEnabled() ? "https://" : "http://";
@@ -195,6 +200,11 @@ class TransferFsImage implements FSConstants {
     }
     long received = 0;
     InputStream stream = connection.getInputStream();
+    MessageDigest digester = null;
+    if (getChecksum) {
+      digester = MD5Hash.getDigester();
+      stream = new DigestInputStream(stream, digester);
+    }
     FileOutputStream[] output = null;
 
     try {
@@ -230,5 +240,6 @@ class TransferFsImage implements FSConstants {
                               advertisedSize);
       }
     }
+    return digester==null ? null : new MD5Hash(digester.digest());
   }
 }

+ 6 - 4
src/test/hdfs/org/apache/hadoop/fs/TestGlobPaths.java

@@ -394,7 +394,7 @@ public class TestGlobPaths extends TestCase {
   private Path[] prepareTesting(String pattern, String[] files)
     throws IOException {
     for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
-      path[i] = new Path(files[i]).makeQualified(fs);
+      path[i] = fs.makeQualified(new Path(files[i]));
       if (!fs.mkdirs(path[i])) {
         throw new IOException("Mkdirs failed to create " + path[i].toString());
       }
@@ -403,7 +403,8 @@ public class TestGlobPaths extends TestCase {
     Path[] globResults = FileUtil.stat2Paths(fs.globStatus(patternPath),
                                              patternPath);
     for(int i=0; i<globResults.length; i++) {
-      globResults[i] = globResults[i].makeQualified(fs);
+      globResults[i] = 
+        globResults[i].makeQualified(fs.getUri(), fs.getWorkingDirectory());
     }
     return globResults;
   }
@@ -411,7 +412,7 @@ public class TestGlobPaths extends TestCase {
   private Path[] prepareTesting(String pattern, String[] files,
       PathFilter filter) throws IOException {
     for(int i=0; i<Math.min(NUM_OF_PATHS, files.length); i++) {
-      path[i] = new Path(files[i]).makeQualified(fs);
+      path[i] = fs.makeQualified(new Path(files[i]));
       if (!fs.mkdirs(path[i])) {
         throw new IOException("Mkdirs failed to create " + path[i].toString());
       }
@@ -420,7 +421,8 @@ public class TestGlobPaths extends TestCase {
     Path[] globResults = FileUtil.stat2Paths(fs.globStatus(patternPath, filter),
                                              patternPath);
     for(int i=0; i<globResults.length; i++) {
-      globResults[i] = globResults[i].makeQualified(fs);
+      globResults[i] = 
+        globResults[i].makeQualified(fs.getUri(), fs.getWorkingDirectory());
     }
     return globResults;
   }

+ 2 - 2
src/test/hdfs/org/apache/hadoop/hdfs/TestLocalDFS.java

@@ -82,8 +82,8 @@ public class TestLocalDFS extends TestCase {
                                     file1.toString()));
 
       // test home directory
-      Path home = new Path("/user/" + getUserName(fileSys))
-        .makeQualified(fileSys);
+      Path home = 
+        fileSys.makeQualified(new Path("/user/" + getUserName(fileSys))); 
       Path fsHome = fileSys.getHomeDirectory();
       assertEquals(home, fsHome);
 

+ 1 - 1
src/test/hdfs/org/apache/hadoop/hdfs/TestModTime.java

@@ -124,7 +124,7 @@ public class TestModTime extends TestCase {
      //
      // create another directory
      //
-     Path dir2 = (new Path("testdir2/")).makeQualified(fileSys);
+     Path dir2 = fileSys.makeQualified(new Path("testdir2/"));
      System.out.println("Creating testdir2 " + dir2);
      assertTrue(fileSys.mkdirs(dir2));
      stat = fileSys.getFileStatus(dir2);

+ 3 - 3
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestBlockTokenWithDFS.java

@@ -201,7 +201,7 @@ public class TestBlockTokenWithDFS extends TestCase {
       stm = fs.append(fileToAppend);
       int mid = rawData.length - 1;
       stm.write(rawData, 1, mid - 1);
-      stm.sync();
+      stm.hflush();
 
       /*
        * wait till token used in stm expires
@@ -253,7 +253,7 @@ public class TestBlockTokenWithDFS extends TestCase {
       // write a partial block
       int mid = rawData.length - 1;
       stm.write(rawData, 0, mid);
-      stm.sync();
+      stm.hflush();
 
       /*
        * wait till token used in stm expires
@@ -315,7 +315,7 @@ public class TestBlockTokenWithDFS extends TestCase {
        * testing READ interface on DN using a BlockReader
        */
 
-      DFSClient dfsclient = new DFSClient(new InetSocketAddress("localhost",
+      new DFSClient(new InetSocketAddress("localhost",
           cluster.getNameNodePort()), conf);
       List<LocatedBlock> locatedBlocks = cluster.getNameNode().getBlockLocations(
           FILE_TO_READ, 0, FILE_SIZE).getLocatedBlocks();

+ 4 - 0
src/test/hdfs/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java

@@ -75,6 +75,7 @@ class MockHttpServletResponse implements HttpServletResponse {
     status = sc;
   }
   
+  @Deprecated
   public void setStatus(int sc, java.lang.String sm) {
   }
   
@@ -105,10 +106,12 @@ class MockHttpServletResponse implements HttpServletResponse {
   public void sendError(int a, java.lang.String b) {
   }
   
+  @Deprecated
   public String encodeRedirectUrl(java.lang.String a) {
     return null;
   }
   
+  @Deprecated
   public String encodeUrl(java.lang.String url) {
     return null;
   }
@@ -117,6 +120,7 @@ class MockHttpServletResponse implements HttpServletResponse {
     return null;
   }
   
+  @Deprecated
   public String encodeURL(java.lang.String url) {
     return null;
   }

+ 8 - 0
src/test/system/conf/system-test-hdfs.xml

@@ -87,6 +87,14 @@
   Command for resuming the given suspended process.
   </description>
 </property>
+<property>
+  <name>test.system.hdrc.hadoop.local.confdir</name>
+  <value>$(TO_DO_GLOBAL_TMP_DIR)/localconf</value>
+  <description>
+    A local directory where a new config file is placed before
+    being pushed into new config location on the cluster.
+  </description>
+</property>
 
 <!-- Mandatory keys to be set for the multi user support to be enabled.  -->