浏览代码

HADOOP-5212. Fix the path translation problem introduced by HADOOP-4868
running on cygwin. (Sharad Agarwal via omalley)


git-svn-id: https://svn.apache.org/repos/asf/hadoop/core/trunk@743035 13f79535-47bb-0310-9956-ffa450edef68

Owen O'Malley 16 年之前
父节点
当前提交
2ad323fcbc

+ 5 - 2
CHANGES.txt

@@ -92,8 +92,8 @@ Trunk (unreleased changes)
     tools, and example jars. Let findbugs depend on this rather than the 'tar'
     tools, and example jars. Let findbugs depend on this rather than the 'tar'
     target. (Giridharan Kesavan via cdouglas)
     target. (Giridharan Kesavan via cdouglas)
 
 
-    HADOOP-4868. Splits the hadoop script into three parts - bin/hadoop, bin/mapred and 
-    bin/hdfs. (Sharad Agarwal via ddas)
+    HADOOP-4868. Splits the hadoop script into three parts - bin/hadoop, 
+    bin/mapred and bin/hdfs. (Sharad Agarwal via ddas)
 
 
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
@@ -151,6 +151,9 @@ Trunk (unreleased changes)
     task-controller be an independent target in build.xml.
     task-controller be an independent target in build.xml.
     (Sreekanth Ramakrishnan via yhemanth)
     (Sreekanth Ramakrishnan via yhemanth)
 
 
+    HADOOP-5212. Fix the path translation problem introduced by HADOOP-4868 
+    running on cygwin. (Sharad Agarwal via omalley)
+    
 Release 0.20.0 - Unreleased
 Release 0.20.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 4 - 0
bin/hadoop

@@ -98,6 +98,10 @@ case $COMMAND in
       CLASS=$COMMAND
       CLASS=$COMMAND
     fi
     fi
     shift
     shift
+    
+    if $cygwin; then
+      CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+    fi
     exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
     exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
     ;;
     ;;
 
 

+ 2 - 3
bin/hadoop-config.sh

@@ -39,7 +39,7 @@ this="$bin/$script"
 # the root of the Hadoop installation
 # the root of the Hadoop installation
 #TODO: change the env variable when dir structure is changed
 #TODO: change the env variable when dir structure is changed
 export HADOOP_HOME=`dirname "$this"`/..
 export HADOOP_HOME=`dirname "$this"`/..
-export HADOOP_CORE_HOME="${HADOOP_CORE_HOME:-$HADOOP_HOME}"
+export HADOOP_CORE_HOME="${HADOOP_HOME}"
 #export HADOOP_HOME=`dirname "$this"`/../..
 #export HADOOP_HOME=`dirname "$this"`/../..
 #export HADOOP_CORE_HOME="${HADOOP_CORE_HOME:-`dirname "$this"`/..}"
 #export HADOOP_CORE_HOME="${HADOOP_CORE_HOME:-`dirname "$this"`/..}"
 
 
@@ -56,7 +56,7 @@ then
 fi
 fi
  
  
 # Allow alternate conf dir location.
 # Allow alternate conf dir location.
-HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
+export HADOOP_CONF_DIR="${HADOOP_CONF_DIR:-$HADOOP_HOME/conf}"
 
 
 #check to see it is specified whether to use the slaves or the
 #check to see it is specified whether to use the slaves or the
 # masters file
 # masters file
@@ -175,7 +175,6 @@ unset IFS
 
 
 # cygwin path translation
 # cygwin path translation
 if $cygwin; then
 if $cygwin; then
-  CLASSPATH=`cygpath -p -w "$CLASSPATH"`
   HADOOP_CORE_HOME=`cygpath -w "$HADOOP_CORE_HOME"`
   HADOOP_CORE_HOME=`cygpath -w "$HADOOP_CORE_HOME"`
   HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"`
   HADOOP_LOG_DIR=`cygpath -w "$HADOOP_LOG_DIR"`
   TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`
   TOOL_PATH=`cygpath -p -w "$TOOL_PATH"`

+ 4 - 0
bin/hdfs

@@ -96,4 +96,8 @@ for f in $HADOOP_HDFS_HOME/lib/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
   CLASSPATH=${CLASSPATH}:$f;
 done
 done
 
 
+if $cygwin; then
+  CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+fi
+
 exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
 exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"

+ 4 - 0
bin/mapred

@@ -93,4 +93,8 @@ for f in $HADOOP_MAPRED_HOME/lib/*.jar; do
   CLASSPATH=${CLASSPATH}:$f;
   CLASSPATH=${CLASSPATH}:$f;
 done
 done
 
 
+if $cygwin; then
+  CLASSPATH=`cygpath -p -w "$CLASSPATH"`
+fi
+
 exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"
 exec "$JAVA" $JAVA_HEAP_MAX $HADOOP_OPTS -classpath "$CLASSPATH" $CLASS "$@"

+ 1 - 1
src/core/org/apache/hadoop/fs/s3/S3InputStream.java

@@ -168,7 +168,7 @@ class S3InputStream extends FSInputStream {
   @Override
   @Override
   public void close() throws IOException {
   public void close() throws IOException {
     if (closed) {
     if (closed) {
-      throw new IOException("Stream closed");
+      return;
     }
     }
     if (blockStream != null) {
     if (blockStream != null) {
       blockStream.close();
       blockStream.close();

+ 1 - 1
src/core/org/apache/hadoop/fs/s3/S3OutputStream.java

@@ -200,7 +200,7 @@ class S3OutputStream extends OutputStream {
   @Override
   @Override
   public synchronized void close() throws IOException {
   public synchronized void close() throws IOException {
     if (closed) {
     if (closed) {
-      throw new IOException("Stream closed");
+      return;
     }
     }
 
 
     flush();
     flush();

+ 8 - 1
src/core/org/apache/hadoop/fs/s3native/NativeS3FileSystem.java

@@ -85,6 +85,7 @@ public class NativeS3FileSystem extends FileSystem {
       this.key = key;
       this.key = key;
     }
     }
     
     
+    @Override
     public synchronized int read() throws IOException {
     public synchronized int read() throws IOException {
       int result = in.read();
       int result = in.read();
       if (result != -1) {
       if (result != -1) {
@@ -92,6 +93,7 @@ public class NativeS3FileSystem extends FileSystem {
       }
       }
       return result;
       return result;
     }
     }
+    @Override
     public synchronized int read(byte[] b, int off, int len)
     public synchronized int read(byte[] b, int off, int len)
       throws IOException {
       throws IOException {
       
       
@@ -102,18 +104,22 @@ public class NativeS3FileSystem extends FileSystem {
       return result;
       return result;
     }
     }
 
 
+    @Override
     public void close() throws IOException {
     public void close() throws IOException {
       in.close();
       in.close();
     }
     }
 
 
+    @Override
     public synchronized void seek(long pos) throws IOException {
     public synchronized void seek(long pos) throws IOException {
       in.close();
       in.close();
       in = store.retrieve(key, pos);
       in = store.retrieve(key, pos);
       this.pos = pos;
       this.pos = pos;
     }
     }
+    @Override
     public synchronized long getPos() throws IOException {
     public synchronized long getPos() throws IOException {
       return pos;
       return pos;
     }
     }
+    @Override
     public boolean seekToNewSource(long targetPos) throws IOException {
     public boolean seekToNewSource(long targetPos) throws IOException {
       return false;
       return false;
     }
     }
@@ -164,7 +170,7 @@ public class NativeS3FileSystem extends FileSystem {
     @Override
     @Override
     public synchronized void close() throws IOException {
     public synchronized void close() throws IOException {
       if (closed) {
       if (closed) {
-        throw new IOException("Stream closed");
+        return;
       }
       }
 
 
       backupStream.close();
       backupStream.close();
@@ -260,6 +266,7 @@ public class NativeS3FileSystem extends FileSystem {
   }
   }
 
 
   /** This optional operation is not yet supported. */
   /** This optional operation is not yet supported. */
+  @Override
   public FSDataOutputStream append(Path f, int bufferSize,
   public FSDataOutputStream append(Path f, int bufferSize,
       Progressable progress) throws IOException {
       Progressable progress) throws IOException {
     throw new IOException("Not supported");
     throw new IOException("Not supported");

+ 79 - 31
src/hdfs/org/apache/hadoop/hdfs/DFSClient.java

@@ -17,39 +17,85 @@
  */
  */
 package org.apache.hadoop.hdfs;
 package org.apache.hadoop.hdfs;
 
 
-import org.apache.hadoop.io.*;
-import org.apache.hadoop.io.retry.RetryPolicies;
-import org.apache.hadoop.io.retry.RetryPolicy;
-import org.apache.hadoop.io.retry.RetryProxy;
-import org.apache.hadoop.fs.*;
+import java.io.BufferedInputStream;
+import java.io.BufferedOutputStream;
+import java.io.DataInputStream;
+import java.io.DataOutputStream;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.io.OutputStream;
+import java.net.InetSocketAddress;
+import java.net.Socket;
+import java.nio.BufferOverflowException;
+import java.nio.ByteBuffer;
+import java.util.AbstractMap;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.Collections;
+import java.util.HashMap;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import java.util.Random;
+import java.util.SortedMap;
+import java.util.TreeMap;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.concurrent.TimeUnit;
+import java.util.zip.CRC32;
+
+import javax.net.SocketFactory;
+import javax.security.auth.login.LoginException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.BlockLocation;
+import org.apache.hadoop.fs.ChecksumException;
+import org.apache.hadoop.fs.ContentSummary;
+import org.apache.hadoop.fs.FSDataInputStream;
+import org.apache.hadoop.fs.FSInputChecker;
+import org.apache.hadoop.fs.FSInputStream;
+import org.apache.hadoop.fs.FSOutputSummer;
+import org.apache.hadoop.fs.FileStatus;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.MD5MD5CRC32FileChecksum;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.fs.Syncable;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
-import org.apache.hadoop.ipc.*;
-import org.apache.hadoop.net.NetUtils;
-import org.apache.hadoop.net.NodeBase;
-import org.apache.hadoop.conf.*;
-import org.apache.hadoop.hdfs.protocol.*;
+import org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException;
+import org.apache.hadoop.hdfs.protocol.Block;
+import org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol;
+import org.apache.hadoop.hdfs.protocol.ClientProtocol;
+import org.apache.hadoop.hdfs.protocol.DataTransferProtocol;
+import org.apache.hadoop.hdfs.protocol.DatanodeID;
+import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
+import org.apache.hadoop.hdfs.protocol.FSConstants;
+import org.apache.hadoop.hdfs.protocol.LocatedBlock;
+import org.apache.hadoop.hdfs.protocol.LocatedBlocks;
+import org.apache.hadoop.hdfs.protocol.QuotaExceededException;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.HdfsConstants;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.common.UpgradeStatusReport;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.datanode.DataNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NameNode;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
 import org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException;
+import org.apache.hadoop.io.DataOutputBuffer;
+import org.apache.hadoop.io.IOUtils;
+import org.apache.hadoop.io.MD5Hash;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.retry.RetryPolicies;
+import org.apache.hadoop.io.retry.RetryPolicy;
+import org.apache.hadoop.io.retry.RetryProxy;
+import org.apache.hadoop.ipc.RPC;
+import org.apache.hadoop.ipc.RemoteException;
+import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.net.NodeBase;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.UnixUserGroupInformation;
 import org.apache.hadoop.security.UnixUserGroupInformation;
-import org.apache.hadoop.util.*;
-
-import org.apache.commons.logging.*;
-
-import java.io.*;
-import java.net.*;
-import java.util.*;
-import java.util.zip.CRC32;
-import java.util.concurrent.TimeUnit;
-import java.util.concurrent.ConcurrentHashMap;
-import java.nio.BufferOverflowException;
-import java.nio.ByteBuffer;
-
-import javax.net.SocketFactory;
-import javax.security.auth.login.LoginException;
+import org.apache.hadoop.util.Daemon;
+import org.apache.hadoop.util.DataChecksum;
+import org.apache.hadoop.util.Progressable;
+import org.apache.hadoop.util.StringUtils;
 
 
 /********************************************************
 /********************************************************
  * DFSClient can connect to a Hadoop Filesystem and 
  * DFSClient can connect to a Hadoop Filesystem and 
@@ -951,6 +997,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     }
     }
 
 
     /** {@inheritDoc} */
     /** {@inheritDoc} */
+    @Override
     public String toString() {
     public String toString() {
       String s = getClass().getSimpleName();
       String s = getClass().getSimpleName();
       if (LOG.isTraceEnabled()) {
       if (LOG.isTraceEnabled()) {
@@ -1528,7 +1575,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     public synchronized void close() throws IOException {
     public synchronized void close() throws IOException {
       checkOpen();
       checkOpen();
       if (closed) {
       if (closed) {
-        throw new IOException("Stream closed");
+        return;
       }
       }
 
 
       if ( blockReader != null ) {
       if ( blockReader != null ) {
@@ -2096,6 +2143,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
 
 
       private volatile boolean closed = false;
       private volatile boolean closed = false;
   
   
+      @Override
       public void run() {
       public void run() {
 
 
         while (!closed && clientRunning) {
         while (!closed && clientRunning) {
@@ -2264,6 +2312,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
         this.targets = targets;
         this.targets = targets;
       }
       }
 
 
+      @Override
       public void run() {
       public void run() {
 
 
         this.setName("ResponseProcessor for block " + block);
         this.setName("ResponseProcessor for block " + block);
@@ -2483,12 +2532,8 @@ public class DFSClient implements FSConstants, java.io.Closeable {
     }
     }
 
 
     private void isClosed() throws IOException {
     private void isClosed() throws IOException {
-      if (closed) {
-        if (lastException != null) {
+      if (closed && lastException != null) {
           throw lastException;
           throw lastException;
-        } else {
-          throw new IOException("Stream closed.");
-        }
       }
       }
     }
     }
 
 
@@ -3010,6 +3055,8 @@ public class DFSClient implements FSConstants, java.io.Closeable {
      */
      */
     @Override
     @Override
     public void close() throws IOException {
     public void close() throws IOException {
+      if(closed)
+        return;
       closeInternal();
       closeInternal();
       leasechecker.remove(src);
       leasechecker.remove(src);
       
       
@@ -3143,6 +3190,7 @@ public class DFSClient implements FSConstants, java.io.Closeable {
   }
   }
 
 
   /** {@inheritDoc} */
   /** {@inheritDoc} */
+  @Override
   public String toString() {
   public String toString() {
     return getClass().getSimpleName() + "[clientName=" + clientName
     return getClass().getSimpleName() + "[clientName=" + clientName
         + ", ugi=" + ugi + "]"; 
         + ", ugi=" + ugi + "]"; 

+ 20 - 6
src/test/org/apache/hadoop/fs/FileSystemContractBaseTest.java

@@ -23,12 +23,6 @@ import java.io.IOException;
 
 
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
-import org.apache.hadoop.fs.FSDataInputStream;
-import org.apache.hadoop.fs.FSDataOutputStream;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileSystem;
-import org.apache.hadoop.fs.Path;
-
 /**
 /**
  * <p>
  * <p>
  * A collection of tests for the contract of the {@link FileSystem}.
  * A collection of tests for the contract of the {@link FileSystem}.
@@ -432,6 +426,26 @@ public abstract class FileSystemContractBaseTest extends TestCase {
         fs.exists(path("/test/new/newdir/dir/subdir/file2")));
         fs.exists(path("/test/new/newdir/dir/subdir/file2")));
   }
   }
 
 
+  public void testInputStreamClosedTwice() throws IOException {
+    //HADOOP-4760 according to Closeable#close() closing already-closed 
+    //streams should have no effect. 
+    Path src = path("/test/hadoop/file");
+    createFile(src);
+    FSDataInputStream in = fs.open(src);
+    in.close();
+    in.close();
+  }
+  
+  public void testOutputStreamClosedTwice() throws IOException {
+    //HADOOP-4760 according to Closeable#close() closing already-closed 
+    //streams should have no effect. 
+    Path src = path("/test/hadoop/file");
+    FSDataOutputStream out = fs.create(src);
+    out.writeChar('H'); //write some data
+    out.close();
+    out.close();
+  }
+  
   protected Path path(String pathString) {
   protected Path path(String pathString) {
     return new Path(pathString).makeQualified(fs);
     return new Path(pathString).makeQualified(fs);
   }
   }