Browse Source

HDFS-5353. Merge change r1548987 from trunk.

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-2@1548988 13f79535-47bb-0310-9956-ffa450edef68
Jing Zhao 11 years ago
parent
commit
6026982bcb

+ 3 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -358,6 +358,9 @@ Release 2.3.0 - UNRELEASED
     HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is 
     HDFS-5590. Block ID and generation stamp may be reused when persistBlocks is 
     set to false. (jing9)
     set to false. (jing9)
 
 
+    HDFS-5353. Short circuit reads fail when dfs.encrypt.data.transfer is 
+    enabled. (Colin Patrick McCabe via jing9)
+
 Release 2.2.0 - 2013-10-13
 Release 2.2.0 - 2013-10-13
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/BasicInetPeer.java

@@ -125,4 +125,9 @@ class BasicInetPeer implements Peer {
   public DomainSocket getDomainSocket() {
   public DomainSocket getDomainSocket() {
     return null;
     return null;
   }
   }
+
+  @Override
+  public boolean hasSecureChannel() {
+    return false;
+  }
 }
 }

+ 15 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/DomainPeer.java

@@ -114,4 +114,19 @@ public class DomainPeer implements Peer {
   public DomainSocket getDomainSocket() {
   public DomainSocket getDomainSocket() {
     return socket;
     return socket;
   }
   }
+
+  @Override
+  public boolean hasSecureChannel() {
+    //
+    // Communication over domain sockets is assumed to be secure, since it
+    // doesn't pass over any network.  We also carefully control the privileges
+    // that can be used on the domain socket inode and its parent directories.
+    // See #{java.org.apache.hadoop.net.unix.DomainSocket#validateSocketPathSecurity0}
+    // for details.
+    //
+    // So unless you are running as root or the hdfs superuser, you cannot
+    // launch a man-in-the-middle attach on UNIX domain socket traffic.
+    //
+    return true;
+  }
 }
 }

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/EncryptedPeer.java

@@ -139,4 +139,9 @@ public class EncryptedPeer implements Peer {
   public DomainSocket getDomainSocket() {
   public DomainSocket getDomainSocket() {
     return enclosedPeer.getDomainSocket();
     return enclosedPeer.getDomainSocket();
   }
   }
+
+  @Override
+  public boolean hasSecureChannel() {
+    return true;
+  }
 }
 }

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/NioInetPeer.java

@@ -128,4 +128,9 @@ class NioInetPeer implements Peer {
   public DomainSocket getDomainSocket() {
   public DomainSocket getDomainSocket() {
     return null;
     return null;
   }
   }
+
+  @Override
+  public boolean hasSecureChannel() {
+    return false;
+  }
 }
 }

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/net/Peer.java

@@ -112,4 +112,12 @@ public interface Peer extends Closeable {
    *                       peer, or null if there is none.
    *                       peer, or null if there is none.
    */
    */
   public DomainSocket getDomainSocket();
   public DomainSocket getDomainSocket();
+  
+  /**
+   * Return true if the channel is secure.
+   *
+   * @return               True if our channel to this peer is not
+   *                       susceptible to man-in-the-middle attacks.
+   */
+  public boolean hasSecureChannel();
 }
 }

+ 1 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataXceiver.java

@@ -162,7 +162,7 @@ class DataXceiver extends Receiver implements Runnable {
     try {
     try {
       peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
       peer.setWriteTimeout(datanode.getDnConf().socketWriteTimeout);
       InputStream input = socketIn;
       InputStream input = socketIn;
-      if (dnConf.encryptDataTransfer) {
+      if ((!peer.hasSecureChannel()) && dnConf.encryptDataTransfer) {
         IOStreamPair encryptedStreams = null;
         IOStreamPair encryptedStreams = null;
         try {
         try {
           encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,
           encryptedStreams = DataTransferEncryptor.getEncryptedStreams(socketOut,

+ 4 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestParallelShortCircuitReadUnCached.java

@@ -42,6 +42,10 @@ public class TestParallelShortCircuitReadUnCached extends TestParallelReadUtil {
       new File(sockDir.getDir(), 
       new File(sockDir.getDir(), 
         "TestParallelShortCircuitReadUnCached._PORT.sock").getAbsolutePath());
         "TestParallelShortCircuitReadUnCached._PORT.sock").getAbsolutePath());
     conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
     conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
+    // Enabling data transfer encryption should have no effect when using
+    // short-circuit local reads.  This is a regression test for HDFS-5353.
+    conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
+    conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
     conf.setBoolean(DFSConfigKeys.
     conf.setBoolean(DFSConfigKeys.
         DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
         DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
     conf.setBoolean(DFSConfigKeys.
     conf.setBoolean(DFSConfigKeys.

+ 5 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestPeerCache.java

@@ -140,6 +140,11 @@ public class TestPeerCache {
     public int hashCode() {
     public int hashCode() {
       return dnId.hashCode() ^ (hasDomain ? 1 : 0);
       return dnId.hashCode() ^ (hasDomain ? 1 : 0);
     }
     }
+
+    @Override
+    public boolean hasSecureChannel() {
+      return false;
+    }
   }
   }
 
 
   @Test
   @Test