فهرست منبع

Merge common from trunk into HDFS-1073

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-1073@1143559 13f79535-47bb-0310-9956-ffa450edef68
Todd Lipcon 14 سال پیش
والد
کامیت
f429118004
69فایلهای تغییر یافته به همراه2666 افزوده شده و 229 حذف شده
  1. 59 0
      common/CHANGES.txt
  2. 19 11
      common/bin/hadoop-config.sh
  3. 42 11
      common/build.xml
  4. 6 5
      common/src/java/core-default.xml
  5. 1 2
      common/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java
  6. 1 2
      common/src/java/org/apache/hadoop/fs/ChecksumFs.java
  7. 8 0
      common/src/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  8. 3 0
      common/src/java/org/apache/hadoop/fs/FileSystem.java
  9. 3 3
      common/src/java/org/apache/hadoop/fs/FileUtil.java
  10. 3 4
      common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java
  11. 2 4
      common/src/java/org/apache/hadoop/fs/Trash.java
  12. 13 7
      common/src/java/org/apache/hadoop/fs/s3/INode.java
  13. 5 2
      common/src/java/org/apache/hadoop/fs/shell/Command.java
  14. 3 0
      common/src/java/org/apache/hadoop/http/HttpServer.java
  15. 14 5
      common/src/java/org/apache/hadoop/io/BloomMapFile.java
  16. 12 1
      common/src/java/org/apache/hadoop/io/BytesWritable.java
  17. 71 0
      common/src/java/org/apache/hadoop/io/DataOutputOutputStream.java
  18. 73 24
      common/src/java/org/apache/hadoop/io/IOUtils.java
  19. 71 0
      common/src/java/org/apache/hadoop/io/ObjectWritable.java
  20. 7 2
      common/src/java/org/apache/hadoop/io/WritableUtils.java
  21. 220 0
      common/src/java/org/apache/hadoop/io/compress/SnappyCodec.java
  22. 9 2
      common/src/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java
  23. 70 0
      common/src/java/org/apache/hadoop/io/compress/snappy/LoadSnappy.java
  24. 298 0
      common/src/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java
  25. 280 0
      common/src/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java
  26. 2 2
      common/src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  27. 1 2
      common/src/java/org/apache/hadoop/io/serializer/SerializationFactory.java
  28. 9 7
      common/src/java/org/apache/hadoop/ipc/Client.java
  29. 1 1
      common/src/java/org/apache/hadoop/ipc/Server.java
  30. 100 37
      common/src/java/org/apache/hadoop/jmx/JMXJsonServlet.java
  31. 1 2
      common/src/java/org/apache/hadoop/metrics/util/MetricsIntValue.java
  32. 1 2
      common/src/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java
  33. 1 2
      common/src/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java
  34. 1 2
      common/src/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java
  35. 1 1
      common/src/java/org/apache/hadoop/net/ScriptBasedMapping.java
  36. 2 4
      common/src/java/org/apache/hadoop/net/SocketIOWithTimeout.java
  37. 39 0
      common/src/java/org/apache/hadoop/security/AnnotatedSecurityInfo.java
  38. 43 0
      common/src/java/org/apache/hadoop/security/SecurityInfo.java
  39. 60 1
      common/src/java/org/apache/hadoop/security/SecurityUtil.java
  40. 9 5
      common/src/java/org/apache/hadoop/security/UserGroupInformation.java
  41. 1 3
      common/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java
  42. 1 2
      common/src/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  43. 66 0
      common/src/java/org/apache/hadoop/util/ProtoUtil.java
  44. 3 0
      common/src/native/Makefile.am
  45. 3 0
      common/src/native/configure.ac
  46. 13 0
      common/src/native/packageNativeHadoop.sh
  47. 127 0
      common/src/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
  48. 131 0
      common/src/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
  49. 58 0
      common/src/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
  50. 1 1
      common/src/saveVersion.sh
  51. 72 0
      common/src/test/bin/smart-apply-patch.sh
  52. 4 2
      common/src/test/bin/test-patch.sh
  53. 13 2
      common/src/test/core/org/apache/hadoop/conf/TestConfiguration.java
  54. 29 4
      common/src/test/core/org/apache/hadoop/fs/FSMainOperationsBaseTest.java
  55. 28 3
      common/src/test/core/org/apache/hadoop/http/TestHtmlQuoting.java
  56. 26 0
      common/src/test/core/org/apache/hadoop/http/TestHttpServer.java
  57. 42 2
      common/src/test/core/org/apache/hadoop/io/TestBytesWritable.java
  58. 45 0
      common/src/test/core/org/apache/hadoop/io/TestIOUtils.java
  59. 81 0
      common/src/test/core/org/apache/hadoop/io/TestObjectWritableProtos.java
  60. 15 3
      common/src/test/core/org/apache/hadoop/io/compress/TestCodec.java
  61. 96 15
      common/src/test/core/org/apache/hadoop/ipc/TestAvroRpc.java
  62. 51 0
      common/src/test/core/org/apache/hadoop/ipc/TestIPC.java
  63. 19 0
      common/src/test/core/org/apache/hadoop/ipc/TestRPC.java
  64. 67 14
      common/src/test/core/org/apache/hadoop/ipc/TestSaslRPC.java
  65. 13 0
      common/src/test/core/org/apache/hadoop/jmx/TestJMXJsonServlet.java
  66. 1 1
      common/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java
  67. 23 23
      common/src/test/core/org/apache/hadoop/security/TestUserGroupInformation.java
  68. 1 1
      common/src/test/core/org/apache/hadoop/security/token/delegation/TestDelegationToken.java
  69. 72 0
      common/src/test/core/org/apache/hadoop/util/TestProtoUtil.java

+ 59 - 0
common/CHANGES.txt

@@ -44,6 +44,15 @@ Trunk (unreleased changes)
     HADOOP-7144. Expose JMX metrics via JSON servlet. (Robert Joseph Evans via
     HADOOP-7144. Expose JMX metrics via JSON servlet. (Robert Joseph Evans via
     cdouglas)
     cdouglas)
 
 
+    HADOOP-7379. Add the ability to serialize and deserialize protocol buffers
+    in ObjectWritable. (todd)
+
+    HADOOP-7206. Support Snappy compression. (Issei Yoshida and
+    Alejandro Abdelnur via eli)
+
+    HADOOP-7329. Add the capability of getting invividual attribute of a mbean
+    using JMXProxyServlet. (tanping)
+
   IMPROVEMENTS
   IMPROVEMENTS
 
 
     HADOOP-7042. Updates to test-patch.sh to include failed test names and
     HADOOP-7042. Updates to test-patch.sh to include failed test names and
@@ -212,6 +221,24 @@ Trunk (unreleased changes)
     HADOOP-7374. Don't add tools.jar to the classpath when running Hadoop.
     HADOOP-7374. Don't add tools.jar to the classpath when running Hadoop.
     (eli)
     (eli)
 
 
+    HADOOP-7106. Reorganize project SVN layout to "unsplit" the projects.
+    (todd, nigel)
+
+    HADOOP-6605. Add JAVA_HOME detection to hadoop-config. (eli)
+
+    HADOOP-7384. Allow test-patch to be more flexible about patch format. (todd)
+
+    HADOOP-6929. RPC should have a way to pass Security information other than 
+    protocol annotations. (sharad and omalley via mahadev)
+
+    HADOOP-7385. Remove StringUtils.stringifyException(ie) in logger functions.
+    (Bharath Mundlapudi via Tanping Wang).
+
+    HADOOP-310. Additional constructor requested in BytesWritable. (Brock
+    Noland via atm)
+
+    HADOOP-7429. Add another IOUtils#copyBytes method. (eli)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
   
   
     HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
     HADOOP-7333. Performance improvement in PureJavaCrc32. (Eric Caspole
@@ -219,6 +246,9 @@ Trunk (unreleased changes)
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-7327. FileSystem.listStatus() throws NullPointerException instead of
+    IOException upon access permission failure. (mattf)
+
     HADOOP-7015. RawLocalFileSystem#listStatus does not deal with a directory
     HADOOP-7015. RawLocalFileSystem#listStatus does not deal with a directory
     whose entries are changing (e.g. in a multi-thread or multi-process
     whose entries are changing (e.g. in a multi-thread or multi-process
     environment). (Sanjay Radia via eli)
     environment). (Sanjay Radia via eli)
@@ -299,6 +329,35 @@ Trunk (unreleased changes)
     HADOOP-7356. RPM packages broke bin/hadoop script in developer environment.
     HADOOP-7356. RPM packages broke bin/hadoop script in developer environment.
     (Eric Yang via todd)
     (Eric Yang via todd)
 
 
+    HADOOP-7389. Use of TestingGroups by tests causes subsequent tests to fail.
+    (atm via tomwhite)
+
+    HADOOP-7390. VersionInfo not generated properly in git after unsplit. (todd
+    via atm)
+
+    HADOOP-7377. Fix command name handling affecting DFSAdmin. (Daryn Sharp
+    via mattf)
+
+    HADOOP-7402. TestConfiguration doesn't clean up after itself. (atm via eli)
+
+    HADOOP-7428. IPC connection is orphaned with null 'out' member.
+    (todd via eli)
+
+    HADOOP-7437. IOUtils.copybytes will suppress the stream closure exceptions.
+    (Uma Maheswara Rao G via szetszwo)
+
+    HADOOP-7090. Fix resource leaks in s3.INode, BloomMapFile, WritableUtils
+    and CBZip2OutputStream.  (Uma Maheswara Rao G via szetszwo)
+
+    HADOOP-7440. HttpServer.getParameterValues throws NPE for missing
+    parameters. (Uma Maheswara Rao G and todd via todd)
+
+    HADOOP-7442. Docs in core-default.xml still reference deprecated config
+    "topology.script.file.name" (atm)
+
+    HADOOP-7419. new hadoop-config.sh doesn't manage classpath for
+    HADOOP_CONF_DIR correctly. (Bing Zheng and todd via todd)
+
 Release 0.22.0 - Unreleased
 Release 0.22.0 - Unreleased
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 19 - 11
common/bin/hadoop-config.sh

@@ -107,18 +107,26 @@ fi
 # we use in Hadoop. Tune the variable down to prevent vmem explosion.
 # we use in Hadoop. Tune the variable down to prevent vmem explosion.
 export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
 export MALLOC_ARENA_MAX=${MALLOC_ARENA_MAX:-4}
 
 
-# some Java parameters
-if [ "$JAVA_HOME" != "" ]; then
-  #echo "run java in $JAVA_HOME"
-  JAVA_HOME=$JAVA_HOME
-fi
-  
-if [ "$JAVA_HOME" = "" ]; then
-  echo "Error: JAVA_HOME is not set."
-  exit 1
+# Attempt to set JAVA_HOME if it is not set
+if [[ -z $JAVA_HOME ]]; then
+  # On OSX use java_home (or /Library for older versions)
+  if [ "Darwin" == "$(uname -s)" ]; then
+    if [ -x /usr/libexec/java_home ]; then
+      export JAVA_HOME=($(/usr/libexec/java_home))
+    else
+      export JAVA_HOME=(/Library/Java/Home)
+    fi
+  fi
+
+  # Bail if we did not detect it
+  if [[ -z $JAVA_HOME ]]; then
+    echo "Error: JAVA_HOME is not set and could not be found." 1>&2
+    exit 1
+  fi
 fi
 fi
 
 
 JAVA=$JAVA_HOME/bin/java
 JAVA=$JAVA_HOME/bin/java
+# some Java parameters
 JAVA_HEAP_MAX=-Xmx1000m 
 JAVA_HEAP_MAX=-Xmx1000m 
 
 
 # check envvars which might override default args
 # check envvars which might override default args
@@ -277,7 +285,7 @@ if [ -d "${HADOOP_HDFS_HOME}" ]; then
     CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME
     CLASSPATH=${CLASSPATH}:$HADOOP_HDFS_HOME
   fi
   fi
   
   
-  if [ -d "${HADOOP_HDFS_HOME}/conf" ]; then
+  if [ ! -d "${HADOOP_CONF_DIR}" ] && [ -d "${HADOOP_HDFS_HOME}/conf" ]; then
     CLASSPATH=${CLASSPATH}:${HADOOP_HDFS_HOME}/conf
     CLASSPATH=${CLASSPATH}:${HADOOP_HDFS_HOME}/conf
   fi
   fi
   
   
@@ -315,7 +323,7 @@ if [ -d "${HADOOP_MAPRED_HOME}" ]; then
     CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME
     CLASSPATH=${CLASSPATH}:$HADOOP_MAPRED_HOME
   fi
   fi
 
 
-  if [ -d "${HADOOP_MAPRED_HOME}/conf" ]; then
+  if [ ! -d "${HADOOP_CONF_DIR}" ] && [ -d "${HADOOP_MAPRED_HOME}/conf" ]; then
     CLASSPATH=${CLASSPATH}:${HADOOP_MAPRED_HOME}/conf
     CLASSPATH=${CLASSPATH}:${HADOOP_MAPRED_HOME}/conf
   fi
   fi
   
   

+ 42 - 11
common/build.xml

@@ -187,6 +187,9 @@
   <property name="build.dir.eclipse-test-classes" value="${build.dir.eclipse}/classes-test"/>
   <property name="build.dir.eclipse-test-classes" value="${build.dir.eclipse}/classes-test"/>
   <property name="build.dir.eclipse-test-generated-classes" value="${build.dir.eclipse}/classes-test-generated"/>
   <property name="build.dir.eclipse-test-generated-classes" value="${build.dir.eclipse}/classes-test-generated"/>
 
 
+  <!-- Use environment -->
+  <property environment="env" />
+
   <!-- check if clover reports should be generated -->
   <!-- check if clover reports should be generated -->
   <condition property="clover.enabled">
   <condition property="clover.enabled">
     <and>
     <and>
@@ -210,6 +213,14 @@
   <property name="package.buildroot" value="/tmp/hadoop_package_build_${user.name}"/>
   <property name="package.buildroot" value="/tmp/hadoop_package_build_${user.name}"/>
   <property name="package.build.dir" value="/tmp/hadoop_package_build_${user.name}/BUILD"/>
   <property name="package.build.dir" value="/tmp/hadoop_package_build_${user.name}/BUILD"/>
 
 
+  <!-- Indicate is Snappy native library should be bundled with Hadoop or not -->
+  <property name="bundle.snappy" value="false"/>
+
+  <!-- Snappy native library location -->
+  <property name="snappy.prefix" value="/usr/local"/>
+  <property name="snappy.lib" value="${snappy.prefix}/lib"/>
+  <property name="snappy.include" value="${snappy.prefix}/include"/>
+
   <!-- the normal classpath -->
   <!-- the normal classpath -->
   <path id="classpath">
   <path id="classpath">
     <pathelement location="${build.classes}"/>
     <pathelement location="${build.classes}"/>
@@ -228,7 +239,7 @@
     <pathelement path="${clover.jar}"/>
     <pathelement path="${clover.jar}"/>
     <path refid="ivy-common.classpath"/>
     <path refid="ivy-common.classpath"/>
     <path refid="ivy-test.classpath"/>
     <path refid="ivy-test.classpath"/>
-    <pathelement location="${build.classes}"/>
+    <pathelement location="${hadoop-common.jar}"/>
     <pathelement location="${test.conf.dir}"/>
     <pathelement location="${test.conf.dir}"/>
   </path>
   </path>
 <!--
 <!--
@@ -401,12 +412,13 @@
   <target name="create-native-makefile" depends="check-native-makefile" if="need.native.makefile"> 
   <target name="create-native-makefile" depends="check-native-makefile" if="need.native.makefile"> 
     <antcall target="create-native-configure"/>
     <antcall target="create-native-configure"/>
     <mkdir dir="${build.native}"/>
     <mkdir dir="${build.native}"/>
-	<exec dir="${build.native}" executable="sh" failonerror="true">
-	  <env key="OS_NAME" value="${os.name}"/>
-	  <env key="OS_ARCH" value="${os.arch}"/>
-	  <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/>
-	  <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/>
-	  <arg line="${native.src.dir}/configure"/>
+
+    <exec dir="${build.native}" executable="sh" failonerror="true">
+      <env key="OS_NAME" value="${os.name}"/>
+      <env key="OS_ARCH" value="${os.arch}"/>
+      <env key="JVM_DATA_MODEL" value="${sun.arch.data.model}"/>
+      <env key="HADOOP_NATIVE_SRCDIR" value="${native.src.dir}"/>
+      <arg line="${native.src.dir}/configure CPPFLAGS=-I${snappy.include} LDFLAGS=-L${snappy.lib}"/>
     </exec>
     </exec>
   </target>
   </target>
 
 
@@ -416,6 +428,7 @@
   	
   	
     <mkdir dir="${build.native}/lib"/>
     <mkdir dir="${build.native}/lib"/>
     <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/zlib"/>
     <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/zlib"/>
+    <mkdir dir="${build.native}/src/org/apache/hadoop/io/compress/snappy"/>
     <mkdir dir="${build.native}/src/org/apache/hadoop/io/nativeio"/>
     <mkdir dir="${build.native}/src/org/apache/hadoop/io/nativeio"/>
     <mkdir dir="${build.native}/src/org/apache/hadoop/security"/>
     <mkdir dir="${build.native}/src/org/apache/hadoop/security"/>
 
 
@@ -429,7 +442,17 @@
       <class name="org.apache.hadoop.io.compress.zlib.ZlibDecompressor" />
       <class name="org.apache.hadoop.io.compress.zlib.ZlibDecompressor" />
   	</javah>
   	</javah>
 
 
-  	<javah
+    <javah
+      classpath="${build.classes}"
+      destdir="${build.native}/src/org/apache/hadoop/io/compress/snappy"
+      force="yes"
+      verbose="yes"
+      >
+      <class name="org.apache.hadoop.io.compress.snappy.SnappyCompressor"/>
+      <class name="org.apache.hadoop.io.compress.snappy.SnappyDecompressor"/>
+    </javah>
+
+    <javah
   	  classpath="${build.classes}"
   	  classpath="${build.classes}"
   	  destdir="${build.native}/src/org/apache/hadoop/security"
   	  destdir="${build.native}/src/org/apache/hadoop/security"
       force="yes"
       force="yes"
@@ -489,6 +512,10 @@
     <property name="jar.properties.list" value="commons-logging.properties, log4j.properties, hadoop-metrics.properties" />
     <property name="jar.properties.list" value="commons-logging.properties, log4j.properties, hadoop-metrics.properties" />
     <jar jarfile="${build.dir}/${final.name}.jar"
     <jar jarfile="${build.dir}/${final.name}.jar"
          basedir="${build.classes}">
          basedir="${build.classes}">
+      <service type="org.apache.hadoop.security.SecurityInfo">
+        <provider 
+           classname="org.apache.hadoop.security.AnnotatedSecurityInfo"/>
+      </service>
       <manifest>
       <manifest>
         <section name="org/apache/hadoop">
         <section name="org/apache/hadoop">
           <attribute name="Implementation-Title" value="${ant.project.name}"/>
           <attribute name="Implementation-Title" value="${ant.project.name}"/>
@@ -562,7 +589,7 @@
   <target name="-classes-compilation"
   <target name="-classes-compilation"
     depends="compile-core-classes, compile-core-test"/> 
     depends="compile-core-classes, compile-core-test"/> 
 
 
-  <target name="compile-core-test" depends="compile-core-classes, ivy-retrieve-test, generate-test-records, generate-avro-records, generate-avro-protocols">
+  <target name="compile-core-test" depends="jar, ivy-retrieve-test, generate-test-records, generate-avro-records, generate-avro-protocols">
     <mkdir dir="${test.core.build.classes}"/>
     <mkdir dir="${test.core.build.classes}"/>
     <javac 
     <javac 
      encoding="${build.encoding}" 
      encoding="${build.encoding}" 
@@ -752,9 +779,10 @@
          <sysproperty key="java.security.krb5.conf" value="@{test.krb5.conf.filename}"/>
          <sysproperty key="java.security.krb5.conf" value="@{test.krb5.conf.filename}"/>
         <sysproperty key="hadoop.policy.file" value="hadoop-policy.xml" />
         <sysproperty key="hadoop.policy.file" value="hadoop-policy.xml" />
         <sysproperty key="java.library.path"
         <sysproperty key="java.library.path"
-          value="${build.native}/lib:${lib.dir}/native/${build.platform}"/>
+          value="${build.native}/lib:${lib.dir}/native/${build.platform}:${snappy.lib}"/>
         <sysproperty key="java.security.egd" value="file:///dev/urandom" />
         <sysproperty key="java.security.egd" value="file:///dev/urandom" />
         <sysproperty key="install.c++.examples" value="${install.c++.examples}"/>
         <sysproperty key="install.c++.examples" value="${install.c++.examples}"/>
+
         <!-- set io.compression.codec.lzo.class in the child jvm only if it is set -->
         <!-- set io.compression.codec.lzo.class in the child jvm only if it is set -->
         <syspropertyset dynamic="no">
         <syspropertyset dynamic="no">
           <propertyref name="io.compression.codec.lzo.class"/>
           <propertyref name="io.compression.codec.lzo.class"/>
@@ -875,7 +903,6 @@
 
 
  <property name="findbugs.home" value=""/>
  <property name="findbugs.home" value=""/>
   <target name="findbugs" depends="check-for-findbugs, jar" if="findbugs.present" description="Run findbugs if present">
   <target name="findbugs" depends="check-for-findbugs, jar" if="findbugs.present" description="Run findbugs if present">
-    <property environment="env"/>
     <property name="findbugs.out.dir" value="${test.build.dir}/findbugs"/>
     <property name="findbugs.out.dir" value="${test.build.dir}/findbugs"/>
     <property name="findbugs.exclude.file" value="${test.src.dir}/findbugsExcludeFile.xml"/>
     <property name="findbugs.exclude.file" value="${test.src.dir}/findbugsExcludeFile.xml"/>
     <property name="findbugs.report.htmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.html"/>
     <property name="findbugs.report.htmlfile" value="${findbugs.out.dir}/hadoop-findbugs-report.html"/>
@@ -1108,6 +1135,8 @@
 	  <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/>
 	  <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/>
 	  <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/>
 	  <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/>
 	  <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/>
 	  <env key="DIST_LIB_DIR" value="${dist.dir}/lib/native"/>
+          <env key="BUNDLE_SNAPPY_LIB" value="${bundle.snappy}"/>
+          <env key="SNAPPY_LIB_DIR" value="${snappy.prefix}/lib"/>
 	  <arg line="${native.src.dir}/packageNativeHadoop.sh"/>
 	  <arg line="${native.src.dir}/packageNativeHadoop.sh"/>
     </exec>
     </exec>
 
 
@@ -1209,6 +1238,8 @@
 	  <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/>
 	  <env key="BASE_NATIVE_LIB_DIR" value="${lib.dir}/native"/>
 	  <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/>
 	  <env key="BUILD_NATIVE_DIR" value="${build.dir}/native"/>
 	  <env key="DIST_LIB_DIR" value="${dist.dir}/lib"/>
 	  <env key="DIST_LIB_DIR" value="${dist.dir}/lib"/>
+          <env key="BUNDLE_SNAPPY_LIB" value="${bundle.snappy}"/>
+          <env key="SNAPPY_LIB_DIR" value="${snappy.prefix}/lib"/>
 	  <arg line="${native.src.dir}/packageNativeHadoop.sh"/>
 	  <arg line="${native.src.dir}/packageNativeHadoop.sh"/>
     </exec>
     </exec>
 
 

+ 6 - 5
common/src/java/core-default.xml

@@ -1,4 +1,6 @@
 <?xml version="1.0"?>
 <?xml version="1.0"?>
+<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
+
 <!--
 <!--
    Licensed to the Apache Software Foundation (ASF) under one or more
    Licensed to the Apache Software Foundation (ASF) under one or more
    contributor license agreements.  See the NOTICE file distributed with
    contributor license agreements.  See the NOTICE file distributed with
@@ -15,7 +17,6 @@
    See the License for the specific language governing permissions and
    See the License for the specific language governing permissions and
    limitations under the License.
    limitations under the License.
 -->
 -->
-<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
 
 
 <!-- Do not modify this file directly.  Instead, copy entries that you -->
 <!-- Do not modify this file directly.  Instead, copy entries that you -->
 <!-- wish to modify from this file into core-site.xml and change them -->
 <!-- wish to modify from this file into core-site.xml and change them -->
@@ -174,7 +175,7 @@
 
 
 <property>
 <property>
   <name>io.compression.codecs</name>
   <name>io.compression.codecs</name>
-  <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec</value>
+  <value>org.apache.hadoop.io.compress.DefaultCodec,org.apache.hadoop.io.compress.GzipCodec,org.apache.hadoop.io.compress.BZip2Codec,org.apache.hadoop.io.compress.DeflateCodec,org.apache.hadoop.io.compress.SnappyCodec</value>
   <description>A list of the compression codec classes that can be used 
   <description>A list of the compression codec classes that can be used 
                for compression/decompression.</description>
                for compression/decompression.</description>
 </property>
 </property>
@@ -550,8 +551,8 @@
 	<name>net.topology.node.switch.mapping.impl</name>
 	<name>net.topology.node.switch.mapping.impl</name>
   <value>org.apache.hadoop.net.ScriptBasedMapping</value>
   <value>org.apache.hadoop.net.ScriptBasedMapping</value>
   <description> The default implementation of the DNSToSwitchMapping. It
   <description> The default implementation of the DNSToSwitchMapping. It
-    invokes a script specified in topology.script.file.name to resolve
-    node names. If the value for topology.script.file.name is not set, the
+    invokes a script specified in net.topology.script.file.name to resolve
+    node names. If the value for net.topology.script.file.name is not set, the
     default value of DEFAULT_RACK is returned for all node names.
     default value of DEFAULT_RACK is returned for all node names.
   </description>
   </description>
 </property>
 </property>
@@ -569,7 +570,7 @@
   <name>net.topology.script.number.args</name>
   <name>net.topology.script.number.args</name>
   <value>100</value>
   <value>100</value>
   <description> The max number of args that the script configured with 
   <description> The max number of args that the script configured with 
-    topology.script.file.name should be run with. Each arg is an
+    net.topology.script.file.name should be run with. Each arg is an
     IP address.
     IP address.
   </description>
   </description>
 </property>
 </property>

+ 1 - 2
common/src/java/org/apache/hadoop/fs/ChecksumFileSystem.java

@@ -151,8 +151,7 @@ public abstract class ChecksumFileSystem extends FilterFileSystem {
         set(fs.verifyChecksum, null, 1, 0);
         set(fs.verifyChecksum, null, 1, 0);
       } catch (IOException e) {                   // loudly ignore
       } catch (IOException e) {                   // loudly ignore
         LOG.warn("Problem opening checksum file: "+ file + 
         LOG.warn("Problem opening checksum file: "+ file + 
-                 ".  Ignoring exception: " + 
-                 StringUtils.stringifyException(e));
+                 ".  Ignoring exception: " , e); 
         set(fs.verifyChecksum, null, 1, 0);
         set(fs.verifyChecksum, null, 1, 0);
       }
       }
     }
     }

+ 1 - 2
common/src/java/org/apache/hadoop/fs/ChecksumFs.java

@@ -142,8 +142,7 @@ public abstract class ChecksumFs extends FilterFs {
         set(fs.verifyChecksum, null, 1, 0);
         set(fs.verifyChecksum, null, 1, 0);
       } catch (IOException e) {                   // loudly ignore
       } catch (IOException e) {                   // loudly ignore
         LOG.warn("Problem opening checksum file: "+ file + 
         LOG.warn("Problem opening checksum file: "+ file + 
-                 ".  Ignoring exception: " + 
-                 StringUtils.stringifyException(e));
+                 ".  Ignoring exception: " , e); 
         set(fs.verifyChecksum, null, 1, 0);
         set(fs.verifyChecksum, null, 1, 0);
       }
       }
     }
     }

+ 8 - 0
common/src/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -85,5 +85,13 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
    */
    */
   public static final String  NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY =
   public static final String  NET_TOPOLOGY_CONFIGURED_NODE_MAPPING_KEY =
     "net.topology.configured.node.mapping";
     "net.topology.configured.node.mapping";
+
+  /** Internal buffer size for Snappy compressor/decompressors */
+  public static final String IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY =
+      "io.compression.codec.snappy.buffersize";
+
+  /** Default value for IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY */
+  public static final int IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT =
+      256 * 1024;
 }
 }
 
 

+ 3 - 0
common/src/java/org/apache/hadoop/fs/FileSystem.java

@@ -1151,6 +1151,9 @@ public abstract class FileSystem extends Configured implements Closeable {
   private void listStatus(ArrayList<FileStatus> results, Path f,
   private void listStatus(ArrayList<FileStatus> results, Path f,
       PathFilter filter) throws FileNotFoundException, IOException {
       PathFilter filter) throws FileNotFoundException, IOException {
     FileStatus listing[] = listStatus(f);
     FileStatus listing[] = listStatus(f);
+    if (listing == null) {
+      throw new IOException("Error accessing " + f);
+    }
 
 
     for (int i = 0; i < listing.length; i++) {
     for (int i = 0; i < listing.length; i++) {
       if (filter.accept(listing[i].getPath())) {
       if (filter.accept(listing[i].getPath())) {

+ 3 - 3
common/src/java/org/apache/hadoop/fs/FileUtil.java

@@ -652,9 +652,9 @@ public class FileUtil {
     try {
     try {
       shExec.execute();
       shExec.execute();
     }catch(Exception e) {
     }catch(Exception e) {
-      if(LOG.isDebugEnabled()) {
-        LOG.debug("Error while changing permission : " + filename 
-            +" Exception: " + StringUtils.stringifyException(e));
+      if (LOG.isDebugEnabled()) {
+        LOG.debug("Error while changing permission : " + filename
+            + " Exception: ", e);
       }
       }
     }
     }
     return shExec.getExitCode();
     return shExec.getExitCode();

+ 3 - 4
common/src/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -232,15 +232,14 @@ public class LocalDirAllocator {
                 dirs.add(localDirs[i]);
                 dirs.add(localDirs[i]);
                 dfList.add(new DF(new File(localDirs[i]), 30000));
                 dfList.add(new DF(new File(localDirs[i]), 30000));
               } catch (DiskErrorException de) {
               } catch (DiskErrorException de) {
-                LOG.warn( localDirs[i] + "is not writable\n" +
-                    StringUtils.stringifyException(de));
+                LOG.warn( localDirs[i] + "is not writable\n", de);
               }
               }
             } else {
             } else {
               LOG.warn( "Failed to create " + localDirs[i]);
               LOG.warn( "Failed to create " + localDirs[i]);
             }
             }
           } catch (IOException ie) { 
           } catch (IOException ie) { 
             LOG.warn( "Failed to create " + localDirs[i] + ": " +
             LOG.warn( "Failed to create " + localDirs[i] + ": " +
-                ie.getMessage() + "\n" + StringUtils.stringifyException(ie));
+                ie.getMessage() + "\n", ie);
           } //ignore
           } //ignore
         }
         }
         localDirs = dirs.toArray(new String[dirs.size()]);
         localDirs = dirs.toArray(new String[dirs.size()]);
@@ -261,7 +260,7 @@ public class LocalDirAllocator {
         DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
         DiskChecker.checkDir(new File(file.getParent().toUri().getPath()));
         return file;
         return file;
       } catch (DiskErrorException d) {
       } catch (DiskErrorException d) {
-        LOG.warn(StringUtils.stringifyException(d));
+        LOG.warn("Disk Error Exception: ", d);
         return null;
         return null;
       }
       }
     }
     }

+ 2 - 4
common/src/java/org/apache/hadoop/fs/Trash.java

@@ -327,15 +327,13 @@ public class Trash extends Configured {
             }
             }
           }
           }
         } catch (Exception e) {
         } catch (Exception e) {
-          LOG.warn("RuntimeException during Trash.Emptier.run() " + 
-                   StringUtils.stringifyException(e));
+          LOG.warn("RuntimeException during Trash.Emptier.run(): ", e); 
         }
         }
       }
       }
       try {
       try {
         fs.close();
         fs.close();
       } catch(IOException e) {
       } catch(IOException e) {
-        LOG.warn("Trash cannot close FileSystem. " +
-            StringUtils.stringifyException(e));
+        LOG.warn("Trash cannot close FileSystem: ", e);
       }
       }
     }
     }
 
 

+ 13 - 7
common/src/java/org/apache/hadoop/fs/s3/INode.java

@@ -27,6 +27,7 @@ import java.io.InputStream;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.io.IOUtils;
 
 
 /**
 /**
  * Holds file metadata including type (regular file, or directory),
  * Holds file metadata including type (regular file, or directory),
@@ -82,15 +83,20 @@ public class INode {
   public InputStream serialize() throws IOException {
   public InputStream serialize() throws IOException {
     ByteArrayOutputStream bytes = new ByteArrayOutputStream();
     ByteArrayOutputStream bytes = new ByteArrayOutputStream();
     DataOutputStream out = new DataOutputStream(bytes);
     DataOutputStream out = new DataOutputStream(bytes);
-    out.writeByte(fileType.ordinal());
-    if (isFile()) {
-      out.writeInt(blocks.length);
-      for (int i = 0; i < blocks.length; i++) {
-        out.writeLong(blocks[i].getId());
-        out.writeLong(blocks[i].getLength());
+    try {
+      out.writeByte(fileType.ordinal());
+      if (isFile()) {
+        out.writeInt(blocks.length);
+        for (int i = 0; i < blocks.length; i++) {
+          out.writeLong(blocks[i].getId());
+          out.writeLong(blocks[i].getLength());
+        }
       }
       }
+      out.close();
+      out = null;
+    } finally {
+      IOUtils.closeStream(out);
     }
     }
-    out.close();
     return new ByteArrayInputStream(bytes.toByteArray());
     return new ByteArrayInputStream(bytes.toByteArray());
   }
   }
   
   

+ 5 - 2
common/src/java/org/apache/hadoop/fs/shell/Command.java

@@ -20,6 +20,7 @@ package org.apache.hadoop.fs.shell;
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
+import java.lang.reflect.Field;
 import java.util.ArrayList;
 import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.LinkedList;
 import java.util.LinkedList;
@@ -378,7 +379,7 @@ abstract public class Command extends Configured {
   public String getName() {
   public String getName() {
     return (name == null)
     return (name == null)
       ? getCommandField("NAME")
       ? getCommandField("NAME")
-      : name.startsWith("-") ? name.substring(1) : name; // this is a historical method
+      : name.startsWith("-") ? name.substring(1) : name;
   }
   }
 
 
   /**
   /**
@@ -433,7 +434,9 @@ abstract public class Command extends Configured {
   private String getCommandField(String field) {
   private String getCommandField(String field) {
     String value;
     String value;
     try {
     try {
-      value = this.getClass().getField(field).get(this).toString();
+      Field f = this.getClass().getDeclaredField(field);
+      f.setAccessible(true);
+      value = f.get(this).toString();
     } catch (Exception e) {
     } catch (Exception e) {
       throw new RuntimeException(
       throw new RuntimeException(
           "failed to get " + this.getClass().getSimpleName()+"."+field, e);
           "failed to get " + this.getClass().getSimpleName()+"."+field, e);

+ 3 - 0
common/src/java/org/apache/hadoop/http/HttpServer.java

@@ -800,6 +800,9 @@ public class HttpServer implements FilterContainer {
       public String[] getParameterValues(String name) {
       public String[] getParameterValues(String name) {
         String unquoteName = HtmlQuoting.unquoteHtmlChars(name);
         String unquoteName = HtmlQuoting.unquoteHtmlChars(name);
         String[] unquoteValue = rawRequest.getParameterValues(unquoteName);
         String[] unquoteValue = rawRequest.getParameterValues(unquoteName);
+        if (unquoteValue == null) {
+          return null;
+        }
         String[] result = new String[unquoteValue.length];
         String[] result = new String[unquoteValue.length];
         for(int i=0; i < result.length; ++i) {
         for(int i=0; i < result.length; ++i) {
           result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]);
           result[i] = HtmlQuoting.quoteHtmlChars(unquoteValue[i]);

+ 14 - 5
common/src/java/org/apache/hadoop/io/BloomMapFile.java

@@ -31,7 +31,6 @@ import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.compress.CompressionCodec;
 import org.apache.hadoop.io.compress.CompressionCodec;
-import org.apache.hadoop.util.Options;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.Progressable;
 import org.apache.hadoop.util.bloom.DynamicBloomFilter;
 import org.apache.hadoop.util.bloom.DynamicBloomFilter;
 import org.apache.hadoop.util.bloom.Filter;
 import org.apache.hadoop.util.bloom.Filter;
@@ -187,9 +186,14 @@ public class BloomMapFile {
     public synchronized void close() throws IOException {
     public synchronized void close() throws IOException {
       super.close();
       super.close();
       DataOutputStream out = fs.create(new Path(dir, BLOOM_FILE_NAME), true);
       DataOutputStream out = fs.create(new Path(dir, BLOOM_FILE_NAME), true);
-      bloomFilter.write(out);
-      out.flush();
-      out.close();
+      try {
+        bloomFilter.write(out);
+        out.flush();
+        out.close();
+        out = null;
+      } finally {
+        IOUtils.closeStream(out);
+      }
     }
     }
 
 
   }
   }
@@ -225,15 +229,20 @@ public class BloomMapFile {
     
     
     private void initBloomFilter(Path dirName, 
     private void initBloomFilter(Path dirName, 
                                  Configuration conf) {
                                  Configuration conf) {
+      
+      DataInputStream in = null;
       try {
       try {
         FileSystem fs = dirName.getFileSystem(conf);
         FileSystem fs = dirName.getFileSystem(conf);
-        DataInputStream in = fs.open(new Path(dirName, BLOOM_FILE_NAME));
+        in = fs.open(new Path(dirName, BLOOM_FILE_NAME));
         bloomFilter = new DynamicBloomFilter();
         bloomFilter = new DynamicBloomFilter();
         bloomFilter.readFields(in);
         bloomFilter.readFields(in);
         in.close();
         in.close();
+        in = null;
       } catch (IOException ioe) {
       } catch (IOException ioe) {
         LOG.warn("Can't open BloomFilter: " + ioe + " - fallback to MapFile.");
         LOG.warn("Can't open BloomFilter: " + ioe + " - fallback to MapFile.");
         bloomFilter = null;
         bloomFilter = null;
+      } finally {
+        IOUtils.closeStream(in);
       }
       }
     }
     }
     
     

+ 12 - 1
common/src/java/org/apache/hadoop/io/BytesWritable.java

@@ -51,8 +51,19 @@ public class BytesWritable extends BinaryComparable
    * @param bytes This array becomes the backing storage for the object.
    * @param bytes This array becomes the backing storage for the object.
    */
    */
   public BytesWritable(byte[] bytes) {
   public BytesWritable(byte[] bytes) {
+    this(bytes, bytes.length);
+  }
+
+  /**
+   * Create a BytesWritable using the byte array as the initial value
+   * and length as the length. Use this constructor if the array is larger
+   * than the value it represents.
+   * @param bytes This array becomes the backing storage for the object.
+   * @param length The number of bytes to use from array.
+   */
+  public BytesWritable(byte[] bytes, int length) {
     this.bytes = bytes;
     this.bytes = bytes;
-    this.size = bytes.length;
+    this.size = length;
   }
   }
   
   
   /**
   /**

+ 71 - 0
common/src/java/org/apache/hadoop/io/DataOutputOutputStream.java

@@ -0,0 +1,71 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io;
+
+import java.io.DataOutput;
+import java.io.IOException;
+import java.io.OutputStream;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+
+/**
+ * OutputStream implementation that wraps a DataOutput.
+ */
+@InterfaceAudience.Public
+@InterfaceStability.Unstable
+public class DataOutputOutputStream extends OutputStream {
+
+  private final DataOutput out;
+
+  /**
+   * Construct an OutputStream from the given DataOutput. If 'out'
+   * is already an OutputStream, simply returns it. Otherwise, wraps
+   * it in an OutputStream.
+   * @param out the DataOutput to wrap
+   * @return an OutputStream instance that outputs to 'out'
+   */
+  public static OutputStream constructOutputStream(DataOutput out) {
+    if (out instanceof OutputStream) {
+      return (OutputStream)out;
+    } else {
+      return new DataOutputOutputStream(out);
+    }
+  }
+  
+  private DataOutputOutputStream(DataOutput out) {
+    this.out = out;
+  }
+  
+  @Override
+  public void write(int b) throws IOException {
+    out.writeByte(b);
+  }
+
+  @Override
+  public void write(byte[] b, int off, int len) throws IOException {
+    out.write(b, off, len);
+  }
+
+  @Override
+  public void write(byte[] b) throws IOException {
+    out.write(b);
+  }
+  
+
+}

+ 73 - 24
common/src/java/org/apache/hadoop/io/IOUtils.java

@@ -36,6 +36,7 @@ public class IOUtils {
 
 
   /**
   /**
    * Copies from one stream to another.
    * Copies from one stream to another.
+   *
    * @param in InputStrem to read from
    * @param in InputStrem to read from
    * @param out OutputStream to write to
    * @param out OutputStream to write to
    * @param buffSize the size of the buffer 
    * @param buffSize the size of the buffer 
@@ -44,7 +45,6 @@ public class IOUtils {
    */
    */
   public static void copyBytes(InputStream in, OutputStream out, int buffSize, boolean close) 
   public static void copyBytes(InputStream in, OutputStream out, int buffSize, boolean close) 
     throws IOException {
     throws IOException {
-
     try {
     try {
       copyBytes(in, out, buffSize);
       copyBytes(in, out, buffSize);
       if(close) {
       if(close) {
@@ -70,7 +70,6 @@ public class IOUtils {
    */
    */
   public static void copyBytes(InputStream in, OutputStream out, int buffSize) 
   public static void copyBytes(InputStream in, OutputStream out, int buffSize) 
     throws IOException {
     throws IOException {
-
     PrintStream ps = out instanceof PrintStream ? (PrintStream)out : null;
     PrintStream ps = out instanceof PrintStream ? (PrintStream)out : null;
     byte buf[] = new byte[buffSize];
     byte buf[] = new byte[buffSize];
     int bytesRead = in.read(buf);
     int bytesRead = in.read(buf);
@@ -82,9 +81,11 @@ public class IOUtils {
       bytesRead = in.read(buf);
       bytesRead = in.read(buf);
     }
     }
   }
   }
+
   /**
   /**
    * Copies from one stream to another. <strong>closes the input and output streams 
    * Copies from one stream to another. <strong>closes the input and output streams 
    * at the end</strong>.
    * at the end</strong>.
+   *
    * @param in InputStrem to read from
    * @param in InputStrem to read from
    * @param out OutputStream to write to
    * @param out OutputStream to write to
    * @param conf the Configuration object 
    * @param conf the Configuration object 
@@ -96,7 +97,8 @@ public class IOUtils {
   
   
   /**
   /**
    * Copies from one stream to another.
    * Copies from one stream to another.
-   * @param in InputStrem to read from
+   *
+   * @param in InputStream to read from
    * @param out OutputStream to write to
    * @param out OutputStream to write to
    * @param conf the Configuration object
    * @param conf the Configuration object
    * @param close whether or not close the InputStream and 
    * @param close whether or not close the InputStream and 
@@ -106,21 +108,64 @@ public class IOUtils {
     throws IOException {
     throws IOException {
     copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096),  close);
     copyBytes(in, out, conf.getInt("io.file.buffer.size", 4096),  close);
   }
   }
+
+  /**
+   * Copies count bytes from one stream to another.
+   *
+   * @param in InputStream to read from
+   * @param out OutputStream to write to
+   * @param count number of bytes to copy
+   * @param close whether to close the streams
+   * @throws IOException if bytes can not be read or written
+   */
+  public static void copyBytes(InputStream in, OutputStream out, long count,
+      boolean close) throws IOException {
+    byte buf[] = new byte[4096];
+    long bytesRemaining = count;
+    int bytesRead;
+
+    try {
+      while (bytesRemaining > 0) {
+        int bytesToRead = (int)
+          (bytesRemaining < buf.length ? bytesRemaining : buf.length);
+
+        bytesRead = in.read(buf, 0, bytesToRead);
+        if (bytesRead == -1)
+          break;
+
+        out.write(buf, 0, bytesRead);
+        bytesRemaining -= bytesRead;
+      }
+      if (close) {
+        out.close();
+        out = null;
+        in.close();
+        in = null;
+      }
+    } finally {
+      if (close) {
+        closeStream(out);
+        closeStream(in);
+      }
+    }
+  }
   
   
-  /** Reads len bytes in a loop.
-   * @param in The InputStream to read from
+  /**
+   * Reads len bytes in a loop.
+   *
+   * @param in InputStream to read from
    * @param buf The buffer to fill
    * @param buf The buffer to fill
    * @param off offset from the buffer
    * @param off offset from the buffer
    * @param len the length of bytes to read
    * @param len the length of bytes to read
    * @throws IOException if it could not read requested number of bytes 
    * @throws IOException if it could not read requested number of bytes 
    * for any reason (including EOF)
    * for any reason (including EOF)
    */
    */
-  public static void readFully( InputStream in, byte buf[],
-      int off, int len ) throws IOException {
+  public static void readFully(InputStream in, byte buf[],
+      int off, int len) throws IOException {
     int toRead = len;
     int toRead = len;
-    while ( toRead > 0 ) {
-      int ret = in.read( buf, off, toRead );
-      if ( ret < 0 ) {
+    while (toRead > 0) {
+      int ret = in.read(buf, off, toRead);
+      if (ret < 0) {
         throw new IOException( "Premature EOF from inputStream");
         throw new IOException( "Premature EOF from inputStream");
       }
       }
       toRead -= ret;
       toRead -= ret;
@@ -128,16 +173,17 @@ public class IOUtils {
     }
     }
   }
   }
   
   
-  /** Similar to readFully(). Skips bytes in a loop.
+  /**
+   * Similar to readFully(). Skips bytes in a loop.
    * @param in The InputStream to skip bytes from
    * @param in The InputStream to skip bytes from
    * @param len number of bytes to skip.
    * @param len number of bytes to skip.
    * @throws IOException if it could not skip requested number of bytes 
    * @throws IOException if it could not skip requested number of bytes 
    * for any reason (including EOF)
    * for any reason (including EOF)
    */
    */
-  public static void skipFully( InputStream in, long len ) throws IOException {
-    while ( len > 0 ) {
-      long ret = in.skip( len );
-      if ( ret < 0 ) {
+  public static void skipFully(InputStream in, long len) throws IOException {
+    while (len > 0) {
+      long ret = in.skip(len);
+      if (ret < 0) {
         throw new IOException( "Premature EOF from inputStream");
         throw new IOException( "Premature EOF from inputStream");
       }
       }
       len -= ret;
       len -= ret;
@@ -147,11 +193,12 @@ public class IOUtils {
   /**
   /**
    * Close the Closeable objects and <b>ignore</b> any {@link IOException} or 
    * Close the Closeable objects and <b>ignore</b> any {@link IOException} or 
    * null pointers. Must only be used for cleanup in exception handlers.
    * null pointers. Must only be used for cleanup in exception handlers.
+   *
    * @param log the log to record problems to at debug level. Can be null.
    * @param log the log to record problems to at debug level. Can be null.
    * @param closeables the objects to close
    * @param closeables the objects to close
    */
    */
   public static void cleanup(Log log, java.io.Closeable... closeables) {
   public static void cleanup(Log log, java.io.Closeable... closeables) {
-    for(java.io.Closeable c : closeables) {
+    for (java.io.Closeable c : closeables) {
       if (c != null) {
       if (c != null) {
         try {
         try {
           c.close();
           c.close();
@@ -167,27 +214,29 @@ public class IOUtils {
   /**
   /**
    * Closes the stream ignoring {@link IOException}.
    * Closes the stream ignoring {@link IOException}.
    * Must only be called in cleaning up from exception handlers.
    * Must only be called in cleaning up from exception handlers.
+   *
    * @param stream the Stream to close
    * @param stream the Stream to close
    */
    */
-  public static void closeStream( java.io.Closeable stream ) {
+  public static void closeStream(java.io.Closeable stream) {
     cleanup(null, stream);
     cleanup(null, stream);
   }
   }
   
   
   /**
   /**
-   * Closes the socket ignoring {@link IOException} 
+   * Closes the socket ignoring {@link IOException}
+   *
    * @param sock the Socket to close
    * @param sock the Socket to close
    */
    */
-  public static void closeSocket( Socket sock ) {
-    // avoids try { close() } dance
-    if ( sock != null ) {
+  public static void closeSocket(Socket sock) {
+    if (sock != null) {
       try {
       try {
-       sock.close();
-      } catch ( IOException ignored ) {
+        sock.close();
+      } catch (IOException ignored) {
       }
       }
     }
     }
   }
   }
   
   
-  /** /dev/null of OutputStreams.
+  /**
+   * The /dev/null of OutputStreams.
    */
    */
   public static class NullOutputStream extends OutputStream {
   public static class NullOutputStream extends OutputStream {
     public void write(byte[] b, int off, int len) throws IOException {
     public void write(byte[] b, int off, int len) throws IOException {

+ 71 - 0
common/src/java/org/apache/hadoop/io/ObjectWritable.java

@@ -19,6 +19,8 @@
 package org.apache.hadoop.io;
 package org.apache.hadoop.io;
 
 
 import java.lang.reflect.Array;
 import java.lang.reflect.Array;
+import java.lang.reflect.InvocationTargetException;
+import java.lang.reflect.Method;
 
 
 import java.io.*;
 import java.io.*;
 import java.util.*;
 import java.util.*;
@@ -26,6 +28,9 @@ import java.util.*;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.conf.*;
+import org.apache.hadoop.util.ProtoUtil;
+
+import com.google.protobuf.Message;
 
 
 /** A polymorphic Writable that writes an instance with it's class name.
 /** A polymorphic Writable that writes an instance with it's class name.
  * Handles arrays, strings and primitive types without a Writable wrapper.
  * Handles arrays, strings and primitive types without a Writable wrapper.
@@ -191,6 +196,9 @@ public class ObjectWritable implements Writable, Configurable {
       UTF8.writeString(out, instance.getClass().getName());
       UTF8.writeString(out, instance.getClass().getName());
       ((Writable)instance).write(out);
       ((Writable)instance).write(out);
 
 
+    } else if (Message.class.isAssignableFrom(declaredClass)) {
+      ((Message)instance).writeDelimitedTo(
+          DataOutputOutputStream.constructOutputStream(out));
     } else {
     } else {
       throw new IOException("Can't write: "+instance+" as "+declaredClass);
       throw new IOException("Can't write: "+instance+" as "+declaredClass);
     }
     }
@@ -261,6 +269,8 @@ public class ObjectWritable implements Writable, Configurable {
       instance = UTF8.readString(in);
       instance = UTF8.readString(in);
     } else if (declaredClass.isEnum()) {         // enum
     } else if (declaredClass.isEnum()) {         // enum
       instance = Enum.valueOf((Class<? extends Enum>) declaredClass, UTF8.readString(in));
       instance = Enum.valueOf((Class<? extends Enum>) declaredClass, UTF8.readString(in));
+    } else if (Message.class.isAssignableFrom(declaredClass)) {
+      instance = tryInstantiateProtobuf(declaredClass, in);
     } else {                                      // Writable
     } else {                                      // Writable
       Class instanceClass = null;
       Class instanceClass = null;
       String str = UTF8.readString(in);
       String str = UTF8.readString(in);
@@ -285,6 +295,67 @@ public class ObjectWritable implements Writable, Configurable {
       
       
   }
   }
 
 
+  /**
+   * Try to instantiate a protocol buffer of the given message class
+   * from the given input stream.
+   * 
+   * @param protoClass the class of the generated protocol buffer
+   * @param dataIn the input stream to read from
+   * @return the instantiated Message instance
+   * @throws IOException if an IO problem occurs
+   */
+  private static Message tryInstantiateProtobuf(
+      Class<?> protoClass,
+      DataInput dataIn) throws IOException {
+
+    try {
+      if (dataIn instanceof InputStream) {
+        // We can use the built-in parseDelimitedFrom and not have to re-copy
+        // the data
+        Method parseMethod = getStaticProtobufMethod(protoClass,
+            "parseDelimitedFrom", InputStream.class);
+        return (Message)parseMethod.invoke(null, (InputStream)dataIn);
+      } else {
+        // Have to read it into a buffer first, since protobuf doesn't deal
+        // with the DataInput interface directly.
+        
+        // Read the size delimiter that writeDelimitedTo writes
+        int size = ProtoUtil.readRawVarint32(dataIn);
+        if (size < 0) {
+          throw new IOException("Invalid size: " + size);
+        }
+      
+        byte[] data = new byte[size];
+        dataIn.readFully(data);
+        Method parseMethod = getStaticProtobufMethod(protoClass,
+            "parseFrom", byte[].class);
+        return (Message)parseMethod.invoke(null, data);
+      }
+    } catch (InvocationTargetException e) {
+      
+      if (e.getCause() instanceof IOException) {
+        throw (IOException)e.getCause();
+      } else {
+        throw new IOException(e.getCause());
+      }
+    } catch (IllegalAccessException iae) {
+      throw new AssertionError("Could not access parse method in " +
+          protoClass);
+    }
+  }
+
+  static Method getStaticProtobufMethod(Class<?> declaredClass, String method,
+      Class<?> ... args) {
+
+    try {
+      return declaredClass.getMethod(method, args);
+    } catch (Exception e) {
+      // This is a bug in Hadoop - protobufs should all have this static method
+      throw new AssertionError("Protocol buffer class " + declaredClass +
+          " does not have an accessible parseFrom(InputStream) method!");
+    }
+  }
+
   /**
   /**
    * Find and load the class with given name <tt>className</tt> by first finding
    * Find and load the class with given name <tt>className</tt> by first finding
    * it in the specified <tt>conf</tt>. If the specified <tt>conf</tt> is null,
    * it in the specified <tt>conf</tt>. If the specified <tt>conf</tt> is null,

+ 7 - 2
common/src/java/org/apache/hadoop/io/WritableUtils.java

@@ -62,8 +62,13 @@ public final class WritableUtils  {
     if (bytes != null) {
     if (bytes != null) {
       ByteArrayOutputStream bos =  new ByteArrayOutputStream();
       ByteArrayOutputStream bos =  new ByteArrayOutputStream();
       GZIPOutputStream gzout = new GZIPOutputStream(bos);
       GZIPOutputStream gzout = new GZIPOutputStream(bos);
-      gzout.write(bytes, 0, bytes.length);
-      gzout.close();
+      try {
+        gzout.write(bytes, 0, bytes.length);
+        gzout.close();
+        gzout = null;
+      } finally {
+        IOUtils.closeStream(gzout);
+      }
       byte[] buffer = bos.toByteArray();
       byte[] buffer = bos.toByteArray();
       int len = buffer.length;
       int len = buffer.length;
       out.writeInt(len);
       out.writeInt(len);

+ 220 - 0
common/src/java/org/apache/hadoop/io/compress/SnappyCodec.java

@@ -0,0 +1,220 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.OutputStream;
+
+import org.apache.hadoop.conf.Configurable;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.snappy.LoadSnappy;
+import org.apache.hadoop.io.compress.snappy.SnappyCompressor;
+import org.apache.hadoop.io.compress.snappy.SnappyDecompressor;
+import org.apache.hadoop.fs.CommonConfigurationKeys;
+
+/**
+ * This class creates snappy compressors/decompressors.
+ */
+public class SnappyCodec implements Configurable, CompressionCodec {
+
+  static {
+    LoadSnappy.isLoaded();
+  }
+
+  Configuration conf;
+
+  /**
+   * Set the configuration to be used by this object.
+   *
+   * @param conf the configuration object.
+   */
+  @Override
+  public void setConf(Configuration conf) {
+    this.conf = conf;
+  }
+
+  /**
+   * Return the configuration used by this object.
+   *
+   * @return the configuration object used by this objec.
+   */
+  @Override
+  public Configuration getConf() {
+    return conf;
+  }
+
+  /**
+   * Are the native snappy libraries loaded & initialized?
+   *
+   * @param conf configuration
+   * @return true if loaded & initialized, otherwise false
+   */
+  public static boolean isNativeSnappyLoaded(Configuration conf) {
+    return LoadSnappy.isLoaded() && conf.getBoolean(
+        CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_KEY,
+        CommonConfigurationKeys.IO_NATIVE_LIB_AVAILABLE_DEFAULT);
+  }
+
+  /**
+   * Create a {@link CompressionOutputStream} that will write to the given
+   * {@link OutputStream}.
+   *
+   * @param out the location for the final output stream
+   * @return a stream the user can write uncompressed data to have it compressed
+   * @throws IOException
+   */
+  @Override
+  public CompressionOutputStream createOutputStream(OutputStream out)
+      throws IOException {
+    return createOutputStream(out, createCompressor());
+  }
+
+  /**
+   * Create a {@link CompressionOutputStream} that will write to the given
+   * {@link OutputStream} with the given {@link Compressor}.
+   *
+   * @param out        the location for the final output stream
+   * @param compressor compressor to use
+   * @return a stream the user can write uncompressed data to have it compressed
+   * @throws IOException
+   */
+  @Override
+  public CompressionOutputStream createOutputStream(OutputStream out,
+                                                    Compressor compressor)
+      throws IOException {
+    if (!isNativeSnappyLoaded(conf)) {
+      throw new RuntimeException("native snappy library not available");
+    }
+    int bufferSize = conf.getInt(
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
+
+    int compressionOverhead = (bufferSize / 6) + 32;
+
+    return new BlockCompressorStream(out, compressor, bufferSize,
+        compressionOverhead);
+  }
+
+  /**
+   * Get the type of {@link Compressor} needed by this {@link CompressionCodec}.
+   *
+   * @return the type of compressor needed by this codec.
+   */
+  @Override
+  public Class<? extends Compressor> getCompressorType() {
+    if (!isNativeSnappyLoaded(conf)) {
+      throw new RuntimeException("native snappy library not available");
+    }
+
+    return SnappyCompressor.class;
+  }
+
+  /**
+   * Create a new {@link Compressor} for use by this {@link CompressionCodec}.
+   *
+   * @return a new compressor for use by this codec
+   */
+  @Override
+  public Compressor createCompressor() {
+    if (!isNativeSnappyLoaded(conf)) {
+      throw new RuntimeException("native snappy library not available");
+    }
+    int bufferSize = conf.getInt(
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
+    return new SnappyCompressor(bufferSize);
+  }
+
+  /**
+   * Create a {@link CompressionInputStream} that will read from the given
+   * input stream.
+   *
+   * @param in the stream to read compressed bytes from
+   * @return a stream to read uncompressed bytes from
+   * @throws IOException
+   */
+  @Override
+  public CompressionInputStream createInputStream(InputStream in)
+      throws IOException {
+    return createInputStream(in, createDecompressor());
+  }
+
+  /**
+   * Create a {@link CompressionInputStream} that will read from the given
+   * {@link InputStream} with the given {@link Decompressor}.
+   *
+   * @param in           the stream to read compressed bytes from
+   * @param decompressor decompressor to use
+   * @return a stream to read uncompressed bytes from
+   * @throws IOException
+   */
+  @Override
+  public CompressionInputStream createInputStream(InputStream in,
+                                                  Decompressor decompressor)
+      throws IOException {
+    if (!isNativeSnappyLoaded(conf)) {
+      throw new RuntimeException("native snappy library not available");
+    }
+
+    return new BlockDecompressorStream(in, decompressor, conf.getInt(
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT));
+  }
+
+  /**
+   * Get the type of {@link Decompressor} needed by this {@link CompressionCodec}.
+   *
+   * @return the type of decompressor needed by this codec.
+   */
+  @Override
+  public Class<? extends Decompressor> getDecompressorType() {
+    if (!isNativeSnappyLoaded(conf)) {
+      throw new RuntimeException("native snappy library not available");
+    }
+
+    return SnappyDecompressor.class;
+  }
+
+  /**
+   * Create a new {@link Decompressor} for use by this {@link CompressionCodec}.
+   *
+   * @return a new decompressor for use by this codec
+   */
+  @Override
+  public Decompressor createDecompressor() {
+    if (!isNativeSnappyLoaded(conf)) {
+      throw new RuntimeException("native snappy library not available");
+    }
+    int bufferSize = conf.getInt(
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_KEY,
+        CommonConfigurationKeys.IO_COMPRESSION_CODEC_SNAPPY_BUFFERSIZE_DEFAULT);
+    return new SnappyDecompressor(bufferSize);
+  }
+
+  /**
+   * Get the default filename extension for this kind of compression.
+   *
+   * @return <code>.snappy</code>.
+   */
+  @Override
+  public String getDefaultExtension() {
+    return ".snappy";
+  }
+}

+ 9 - 2
common/src/java/org/apache/hadoop/io/compress/bzip2/CBZip2OutputStream.java

@@ -27,6 +27,8 @@ package org.apache.hadoop.io.compress.bzip2;
 import java.io.OutputStream;
 import java.io.OutputStream;
 import java.io.IOException;
 import java.io.IOException;
 
 
+import org.apache.hadoop.io.IOUtils;
+
 /**
 /**
  * An output stream that compresses into the BZip2 format (without the file
  * An output stream that compresses into the BZip2 format (without the file
  * header chars) into another stream.
  * header chars) into another stream.
@@ -727,8 +729,13 @@ public class CBZip2OutputStream extends OutputStream implements BZip2Constants {
   public void close() throws IOException {
   public void close() throws IOException {
     if (out != null) {
     if (out != null) {
       OutputStream outShadow = this.out;
       OutputStream outShadow = this.out;
-      finish();
-      outShadow.close();
+      try {
+        finish();
+        outShadow.close();
+        outShadow = null;
+      } finally {
+        IOUtils.closeStream(outShadow);
+      }
     }
     }
   }
   }
   
   

+ 70 - 0
common/src/java/org/apache/hadoop/io/compress/snappy/LoadSnappy.java

@@ -0,0 +1,70 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io.compress.snappy;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.util.NativeCodeLoader;
+
+/**
+ * Determines if Snappy native library is available and loads it if available.
+ */
+public class LoadSnappy {
+  private static final Log LOG = LogFactory.getLog(LoadSnappy.class.getName());
+
+  private static boolean AVAILABLE = false;
+  private static boolean LOADED = false;
+
+  static {
+    try {
+      System.loadLibrary("snappy");
+      LOG.warn("Snappy native library is available");
+      AVAILABLE = true;
+    } catch (UnsatisfiedLinkError ex) {
+      //NOP
+    }
+    boolean hadoopNativeAvailable = NativeCodeLoader.isNativeCodeLoaded();
+    LOADED = AVAILABLE && hadoopNativeAvailable;
+    if (LOADED) {
+      LOG.info("Snappy native library loaded");
+    } else {
+      LOG.warn("Snappy native library not loaded");
+    }
+  }
+
+  /**
+   * Returns if Snappy native library is loaded.
+   *
+   * @return <code>true</code> if Snappy native library is loaded,
+   * <code>false</code> if not.
+   */
+  public static boolean isAvailable() {
+    return AVAILABLE;
+  }
+
+  /**
+   * Returns if Snappy native library is loaded.
+   *
+   * @return <code>true</code> if Snappy native library is loaded,
+   * <code>false</code> if not.
+   */
+  public static boolean isLoaded() {
+    return LOADED;
+  }
+
+}

+ 298 - 0
common/src/java/org/apache/hadoop/io/compress/snappy/SnappyCompressor.java

@@ -0,0 +1,298 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.snappy;
+
+import java.io.IOException;
+import java.nio.Buffer;
+import java.nio.ByteBuffer;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.compress.Compressor;
+
+/**
+ * A {@link Compressor} based on the snappy compression algorithm.
+ * http://code.google.com/p/snappy/
+ */
+public class SnappyCompressor implements Compressor {
+  private static final Log LOG =
+      LogFactory.getLog(SnappyCompressor.class.getName());
+  private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
+
+  // HACK - Use this as a global lock in the JNI layer
+  @SuppressWarnings({"unchecked", "unused"})
+  private static Class clazz = SnappyCompressor.class;
+
+  private int directBufferSize;
+  private Buffer compressedDirectBuf = null;
+  private int uncompressedDirectBufLen;
+  private Buffer uncompressedDirectBuf = null;
+  private byte[] userBuf = null;
+  private int userBufOff = 0, userBufLen = 0;
+  private boolean finish, finished;
+
+  private long bytesRead = 0L;
+  private long bytesWritten = 0L;
+
+
+  static {
+    if (LoadSnappy.isLoaded()) {
+      // Initialize the native library
+      try {
+        initIDs();
+      } catch (Throwable t) {
+        // Ignore failure to load/initialize snappy
+        LOG.warn(t.toString());
+      }
+    } else {
+      LOG.error("Cannot load " + SnappyCompressor.class.getName() +
+          " without snappy library!");
+    }
+  }
+
+  /**
+   * Creates a new compressor.
+   *
+   * @param directBufferSize size of the direct buffer to be used.
+   */
+  public SnappyCompressor(int directBufferSize) {
+    this.directBufferSize = directBufferSize;
+
+    uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    compressedDirectBuf.position(directBufferSize);
+  }
+
+  /**
+   * Creates a new compressor with the default buffer size.
+   */
+  public SnappyCompressor() {
+    this(DEFAULT_DIRECT_BUFFER_SIZE);
+  }
+
+  /**
+   * Sets input data for compression.
+   * This should be called whenever #needsInput() returns
+   * <code>true</code> indicating that more input data is required.
+   *
+   * @param b   Input data
+   * @param off Start offset
+   * @param len Length
+   */
+  @Override
+  public synchronized void setInput(byte[] b, int off, int len) {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+    finished = false;
+
+    if (len > uncompressedDirectBuf.remaining()) {
+      // save data; now !needsInput
+      this.userBuf = b;
+      this.userBufOff = off;
+      this.userBufLen = len;
+    } else {
+      ((ByteBuffer) uncompressedDirectBuf).put(b, off, len);
+      uncompressedDirectBufLen = uncompressedDirectBuf.position();
+    }
+
+    bytesRead += len;
+  }
+
+  /**
+   * If a write would exceed the capacity of the direct buffers, it is set
+   * aside to be loaded by this function while the compressed data are
+   * consumed.
+   */
+  synchronized void setInputFromSavedData() {
+    if (0 >= userBufLen) {
+      return;
+    }
+    finished = false;
+
+    uncompressedDirectBufLen = Math.min(userBufLen, directBufferSize);
+    ((ByteBuffer) uncompressedDirectBuf).put(userBuf, userBufOff,
+        uncompressedDirectBufLen);
+
+    // Note how much data is being fed to snappy
+    userBufOff += uncompressedDirectBufLen;
+    userBufLen -= uncompressedDirectBufLen;
+  }
+
+  /**
+   * Does nothing.
+   */
+  @Override
+  public synchronized void setDictionary(byte[] b, int off, int len) {
+    // do nothing
+  }
+
+  /**
+   * Returns true if the input data buffer is empty and
+   * #setInput() should be called to provide more input.
+   *
+   * @return <code>true</code> if the input data buffer is empty and
+   *         #setInput() should be called in order to provide more input.
+   */
+  @Override
+  public synchronized boolean needsInput() {
+    return !(compressedDirectBuf.remaining() > 0
+        || uncompressedDirectBuf.remaining() == 0 || userBufLen > 0);
+  }
+
+  /**
+   * When called, indicates that compression should end
+   * with the current contents of the input buffer.
+   */
+  @Override
+  public synchronized void finish() {
+    finish = true;
+  }
+
+  /**
+   * Returns true if the end of the compressed
+   * data output stream has been reached.
+   *
+   * @return <code>true</code> if the end of the compressed
+   *         data output stream has been reached.
+   */
+  @Override
+  public synchronized boolean finished() {
+    // Check if all uncompressed data has been consumed
+    return (finish && finished && compressedDirectBuf.remaining() == 0);
+  }
+
+  /**
+   * Fills specified buffer with compressed data. Returns actual number
+   * of bytes of compressed data. A return value of 0 indicates that
+   * needsInput() should be called in order to determine if more input
+   * data is required.
+   *
+   * @param b   Buffer for the compressed data
+   * @param off Start offset of the data
+   * @param len Size of the buffer
+   * @return The actual number of bytes of compressed data.
+   */
+  @Override
+  public synchronized int compress(byte[] b, int off, int len)
+      throws IOException {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+
+    // Check if there is compressed data
+    int n = compressedDirectBuf.remaining();
+    if (n > 0) {
+      n = Math.min(n, len);
+      ((ByteBuffer) compressedDirectBuf).get(b, off, n);
+      bytesWritten += n;
+      return n;
+    }
+
+    // Re-initialize the snappy's output direct-buffer
+    compressedDirectBuf.clear();
+    compressedDirectBuf.limit(0);
+    if (0 == uncompressedDirectBuf.position()) {
+      // No compressed data, so we should have !needsInput or !finished
+      setInputFromSavedData();
+      if (0 == uncompressedDirectBuf.position()) {
+        // Called without data; write nothing
+        finished = true;
+        return 0;
+      }
+    }
+
+    // Compress data
+    n = compressBytesDirect();
+    compressedDirectBuf.limit(n);
+    uncompressedDirectBuf.clear(); // snappy consumes all buffer input
+
+    // Set 'finished' if snapy has consumed all user-data
+    if (0 == userBufLen) {
+      finished = true;
+    }
+
+    // Get atmost 'len' bytes
+    n = Math.min(n, len);
+    bytesWritten += n;
+    ((ByteBuffer) compressedDirectBuf).get(b, off, n);
+
+    return n;
+  }
+
+  /**
+   * Resets compressor so that a new set of input data can be processed.
+   */
+  @Override
+  public synchronized void reset() {
+    finish = false;
+    finished = false;
+    uncompressedDirectBuf.clear();
+    uncompressedDirectBufLen = 0;
+    compressedDirectBuf.clear();
+    compressedDirectBuf.limit(0);
+    userBufOff = userBufLen = 0;
+    bytesRead = bytesWritten = 0L;
+  }
+
+  /**
+   * Prepare the compressor to be used in a new stream with settings defined in
+   * the given Configuration
+   *
+   * @param conf Configuration from which new setting are fetched
+   */
+  @Override
+  public synchronized void reinit(Configuration conf) {
+    reset();
+  }
+
+  /**
+   * Return number of bytes given to this compressor since last reset.
+   */
+  @Override
+  public synchronized long getBytesRead() {
+    return bytesRead;
+  }
+
+  /**
+   * Return number of bytes consumed by callers of compress since last reset.
+   */
+  @Override
+  public synchronized long getBytesWritten() {
+    return bytesWritten;
+  }
+
+  /**
+   * Closes the compressor and discards any unprocessed input.
+   */
+  @Override
+  public synchronized void end() {
+  }
+
+  private native static void initIDs();
+
+  private native int compressBytesDirect();
+}

+ 280 - 0
common/src/java/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.java

@@ -0,0 +1,280 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.io.compress.snappy;
+
+import java.io.IOException;
+import java.nio.Buffer;
+import java.nio.ByteBuffer;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.compress.Decompressor;
+
+/**
+ * A {@link Decompressor} based on the snappy compression algorithm.
+ * http://code.google.com/p/snappy/
+ */
+public class SnappyDecompressor implements Decompressor {
+  private static final Log LOG =
+      LogFactory.getLog(SnappyCompressor.class.getName());
+  private static final int DEFAULT_DIRECT_BUFFER_SIZE = 64 * 1024;
+
+  // HACK - Use this as a global lock in the JNI layer
+  @SuppressWarnings({"unchecked", "unused"})
+  private static Class clazz = SnappyDecompressor.class;
+
+  private int directBufferSize;
+  private Buffer compressedDirectBuf = null;
+  private int compressedDirectBufLen;
+  private Buffer uncompressedDirectBuf = null;
+  private byte[] userBuf = null;
+  private int userBufOff = 0, userBufLen = 0;
+  private boolean finished;
+
+  static {
+    if (LoadSnappy.isLoaded()) {
+      // Initialize the native library
+      try {
+        initIDs();
+      } catch (Throwable t) {
+        // Ignore failure to load/initialize snappy
+        LOG.warn(t.toString());
+      }
+    } else {
+      LOG.error("Cannot load " + SnappyDecompressor.class.getName() +
+          " without snappy library!");
+    }
+  }
+
+  /**
+   * Creates a new compressor.
+   *
+   * @param directBufferSize size of the direct buffer to be used.
+   */
+  public SnappyDecompressor(int directBufferSize) {
+    this.directBufferSize = directBufferSize;
+
+    compressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    uncompressedDirectBuf = ByteBuffer.allocateDirect(directBufferSize);
+    uncompressedDirectBuf.position(directBufferSize);
+
+  }
+
+  /**
+   * Creates a new decompressor with the default buffer size.
+   */
+  public SnappyDecompressor() {
+    this(DEFAULT_DIRECT_BUFFER_SIZE);
+  }
+
+  /**
+   * Sets input data for decompression.
+   * This should be called if and only if {@link #needsInput()} returns
+   * <code>true</code> indicating that more input data is required.
+   * (Both native and non-native versions of various Decompressors require
+   * that the data passed in via <code>b[]</code> remain unmodified until
+   * the caller is explicitly notified--via {@link #needsInput()}--that the
+   * buffer may be safely modified.  With this requirement, an extra
+   * buffer-copy can be avoided.)
+   *
+   * @param b   Input data
+   * @param off Start offset
+   * @param len Length
+   */
+  @Override
+  public synchronized void setInput(byte[] b, int off, int len) {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+
+    this.userBuf = b;
+    this.userBufOff = off;
+    this.userBufLen = len;
+
+    setInputFromSavedData();
+
+    // Reinitialize snappy's output direct-buffer
+    uncompressedDirectBuf.limit(directBufferSize);
+    uncompressedDirectBuf.position(directBufferSize);
+  }
+
+  /**
+   * If a write would exceed the capacity of the direct buffers, it is set
+   * aside to be loaded by this function while the compressed data are
+   * consumed.
+   */
+  synchronized void setInputFromSavedData() {
+    compressedDirectBufLen = Math.min(userBufLen, directBufferSize);
+
+    // Reinitialize snappy's input direct buffer
+    compressedDirectBuf.rewind();
+    ((ByteBuffer) compressedDirectBuf).put(userBuf, userBufOff,
+        compressedDirectBufLen);
+
+    // Note how much data is being fed to snappy
+    userBufOff += compressedDirectBufLen;
+    userBufLen -= compressedDirectBufLen;
+  }
+
+  /**
+   * Does nothing.
+   */
+  @Override
+  public synchronized void setDictionary(byte[] b, int off, int len) {
+    // do nothing
+  }
+
+  /**
+   * Returns true if the input data buffer is empty and
+   * {@link #setInput(byte[], int, int)} should be called to
+   * provide more input.
+   *
+   * @return <code>true</code> if the input data buffer is empty and
+   *         {@link #setInput(byte[], int, int)} should be called in
+   *         order to provide more input.
+   */
+  @Override
+  public synchronized boolean needsInput() {
+    // Consume remaining compressed data?
+    if (uncompressedDirectBuf.remaining() > 0) {
+      return false;
+    }
+
+    // Check if snappy has consumed all input
+    if (compressedDirectBufLen <= 0) {
+      // Check if we have consumed all user-input
+      if (userBufLen <= 0) {
+        return true;
+      } else {
+        setInputFromSavedData();
+      }
+    }
+
+    return false;
+  }
+
+  /**
+   * Returns <code>false</code>.
+   *
+   * @return <code>false</code>.
+   */
+  @Override
+  public synchronized boolean needsDictionary() {
+    return false;
+  }
+
+  /**
+   * Returns true if the end of the decompressed
+   * data output stream has been reached.
+   *
+   * @return <code>true</code> if the end of the decompressed
+   *         data output stream has been reached.
+   */
+  @Override
+  public synchronized boolean finished() {
+    return (finished && uncompressedDirectBuf.remaining() == 0);
+  }
+
+  /**
+   * Fills specified buffer with uncompressed data. Returns actual number
+   * of bytes of uncompressed data. A return value of 0 indicates that
+   * {@link #needsInput()} should be called in order to determine if more
+   * input data is required.
+   *
+   * @param b   Buffer for the compressed data
+   * @param off Start offset of the data
+   * @param len Size of the buffer
+   * @return The actual number of bytes of compressed data.
+   * @throws IOException
+   */
+  @Override
+  public synchronized int decompress(byte[] b, int off, int len)
+      throws IOException {
+    if (b == null) {
+      throw new NullPointerException();
+    }
+    if (off < 0 || len < 0 || off > b.length - len) {
+      throw new ArrayIndexOutOfBoundsException();
+    }
+
+    int n = 0;
+
+    // Check if there is uncompressed data
+    n = uncompressedDirectBuf.remaining();
+    if (n > 0) {
+      n = Math.min(n, len);
+      ((ByteBuffer) uncompressedDirectBuf).get(b, off, n);
+      return n;
+    }
+    if (compressedDirectBufLen > 0) {
+      // Re-initialize the snappy's output direct buffer
+      uncompressedDirectBuf.rewind();
+      uncompressedDirectBuf.limit(directBufferSize);
+
+      // Decompress data
+      n = decompressBytesDirect();
+      uncompressedDirectBuf.limit(n);
+
+      if (userBufLen <= 0) {
+        finished = true;
+      }
+
+      // Get atmost 'len' bytes
+      n = Math.min(n, len);
+      ((ByteBuffer) uncompressedDirectBuf).get(b, off, n);
+    }
+
+    return n;
+  }
+
+  /**
+   * Returns <code>0</code>.
+   *
+   * @return <code>0</code>.
+   */
+  @Override
+  public synchronized int getRemaining() {
+    // Never use this function in BlockDecompressorStream.
+    return 0;
+  }
+
+  public synchronized void reset() {
+    finished = false;
+    compressedDirectBufLen = 0;
+    uncompressedDirectBuf.limit(directBufferSize);
+    uncompressedDirectBuf.position(directBufferSize);
+    userBufOff = userBufLen = 0;
+  }
+
+  /**
+   * Resets decompressor and input and output buffers so that a new set of
+   * input data can be processed.
+   */
+  @Override
+  public synchronized void end() {
+    // do nothing
+  }
+
+  private native static void initIDs();
+
+  private native int decompressBytesDirect();
+}

+ 2 - 2
common/src/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -61,7 +61,7 @@ class RetryInvocationHandler implements InvocationHandler {
         if (!policy.shouldRetry(e, retries++)) {
         if (!policy.shouldRetry(e, retries++)) {
           LOG.info("Exception while invoking " + method.getName()
           LOG.info("Exception while invoking " + method.getName()
                    + " of " + implementation.getClass() + ". Not retrying."
                    + " of " + implementation.getClass() + ". Not retrying."
-                   + StringUtils.stringifyException(e));
+                   , e);
           if (!method.getReturnType().equals(Void.TYPE)) {
           if (!method.getReturnType().equals(Void.TYPE)) {
             throw e; // non-void methods can't fail without an exception
             throw e; // non-void methods can't fail without an exception
           }
           }
@@ -70,7 +70,7 @@ class RetryInvocationHandler implements InvocationHandler {
         if(LOG.isDebugEnabled()) {
         if(LOG.isDebugEnabled()) {
           LOG.debug("Exception while invoking " + method.getName()
           LOG.debug("Exception while invoking " + method.getName()
               + " of " + implementation.getClass() + ". Retrying."
               + " of " + implementation.getClass() + ". Retrying."
-              + StringUtils.stringifyException(e));
+              , e);
         }
         }
       }
       }
     }
     }

+ 1 - 2
common/src/java/org/apache/hadoop/io/serializer/SerializationFactory.java

@@ -71,8 +71,7 @@ public class SerializationFactory extends Configured {
       serializations.add((Serialization)
       serializations.add((Serialization)
 	  ReflectionUtils.newInstance(serializionClass, getConf()));
 	  ReflectionUtils.newInstance(serializionClass, getConf()));
     } catch (ClassNotFoundException e) {
     } catch (ClassNotFoundException e) {
-      LOG.warn("Serialization class not found: " +
-          StringUtils.stringifyException(e));
+      LOG.warn("Serialization class not found: ", e);
     }
     }
   }
   }
 
 

+ 9 - 7
common/src/java/org/apache/hadoop/ipc/Client.java

@@ -19,10 +19,8 @@
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
 import java.net.InetAddress;
 import java.net.InetAddress;
-import java.net.NetworkInterface;
 import java.net.Socket;
 import java.net.Socket;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
-import java.net.SocketException;
 import java.net.SocketTimeoutException;
 import java.net.SocketTimeoutException;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
 import java.net.ConnectException;
 import java.net.ConnectException;
@@ -254,7 +252,7 @@ public class Client {
       Class<?> protocol = remoteId.getProtocol();
       Class<?> protocol = remoteId.getProtocol();
       this.useSasl = UserGroupInformation.isSecurityEnabled();
       this.useSasl = UserGroupInformation.isSecurityEnabled();
       if (useSasl && protocol != null) {
       if (useSasl && protocol != null) {
-        TokenInfo tokenInfo = protocol.getAnnotation(TokenInfo.class);
+        TokenInfo tokenInfo = SecurityUtil.getTokenInfo(protocol);
         if (tokenInfo != null) {
         if (tokenInfo != null) {
           TokenSelector<? extends TokenIdentifier> tokenSelector = null;
           TokenSelector<? extends TokenIdentifier> tokenSelector = null;
           try {
           try {
@@ -269,7 +267,7 @@ public class Client {
               .getHostAddress() + ":" + addr.getPort()), 
               .getHostAddress() + ":" + addr.getPort()), 
               ticket.getTokens());
               ticket.getTokens());
         }
         }
-        KerberosInfo krbInfo = protocol.getAnnotation(KerberosInfo.class);
+        KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol);
         if (krbInfo != null) {
         if (krbInfo != null) {
           serverPrincipal = remoteId.getServerPrincipal();
           serverPrincipal = remoteId.getServerPrincipal();
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
@@ -585,8 +583,12 @@ public class Client {
           start();
           start();
           return;
           return;
         }
         }
-      } catch (IOException e) {
-        markClosed(e);
+      } catch (Throwable t) {
+        if (t instanceof IOException) {
+          markClosed((IOException)t);
+        } else {
+          markClosed(new IOException("Couldn't set up IO streams", t));
+        }
         close();
         close();
       }
       }
     }
     }
@@ -1283,7 +1285,7 @@ public class Client {
       if (!UserGroupInformation.isSecurityEnabled() || protocol == null) {
       if (!UserGroupInformation.isSecurityEnabled() || protocol == null) {
         return null;
         return null;
       }
       }
-      KerberosInfo krbInfo = protocol.getAnnotation(KerberosInfo.class);
+      KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol);
       if (krbInfo != null) {
       if (krbInfo != null) {
         String serverKey = krbInfo.serverPrincipal();
         String serverKey = krbInfo.serverPrincipal();
         if (serverKey == null) {
         if (serverKey == null) {

+ 1 - 1
common/src/java/org/apache/hadoop/ipc/Server.java

@@ -1501,7 +1501,7 @@ public abstract class Server {
                   );
                   );
             }
             }
           } catch (Throwable e) {
           } catch (Throwable e) {
-            LOG.info(getName()+", call "+call+": error: " + e, e);
+            LOG.info(getName() + ", call: " + call + ", error: ", e);
             errorClass = e.getClass().getName();
             errorClass = e.getClass().getName();
             error = StringUtils.stringifyException(e);
             error = StringUtils.stringifyException(e);
             // Remove redundant error class name from the beginning of the stack trace
             // Remove redundant error class name from the beginning of the stack trace

+ 100 - 37
common/src/java/org/apache/hadoop/jmx/JMXJsonServlet.java

@@ -67,8 +67,20 @@ import org.codehaus.jackson.JsonGenerator;
  * For example <code>http://.../jmx?qry=Hadoop:*</code> will return
  * For example <code>http://.../jmx?qry=Hadoop:*</code> will return
  * all hadoop metrics exposed through JMX.
  * all hadoop metrics exposed through JMX.
  * <p>
  * <p>
- * If the <code>qry</code> parameter is not formatted correctly then a
- * 400 BAD REQUEST http response code will be returned. 
+ * The optional <code>get</code> parameter is used to query an specific 
+ * attribute of a JMX bean.  The format of the URL is
+ * <code>http://.../jmx?get=MXBeanName::AttributeName<code>
+ * <p>
+ * For example 
+ * <code>
+ * http://../jmx?get=Hadoop:service=NameNode,name=NameNodeInfo::ClusterId
+ * </code> will return the cluster id of the namenode mxbean.
+ * <p>
+ * If the <code>qry</code> or the <code>get</code> parameter is not formatted 
+ * correctly then a 400 BAD REQUEST http response code will be returned. 
+ * <p>
+ * If a resouce such as a mbean or attribute can not be found, 
+ * a 404 SC_NOT_FOUND http response code will be returned. 
  * <p>
  * <p>
  * The return format is JSON and in the form
  * The return format is JSON and in the form
  * <p>
  * <p>
@@ -150,25 +162,49 @@ public class JMXJsonServlet extends HttpServlet {
         jg.writeStringField("result", "ERROR");
         jg.writeStringField("result", "ERROR");
         jg.writeStringField("message", "No MBeanServer could be found");
         jg.writeStringField("message", "No MBeanServer could be found");
         jg.close();
         jg.close();
+        LOG.error("No MBeanServer could be found.");
+        response.setStatus(HttpServletResponse.SC_NOT_FOUND);
         return;
         return;
       }
       }
+      
+      // query per mbean attribute
+      String getmethod = request.getParameter("get");
+      if (getmethod != null) {
+        String[] splitStrings = getmethod.split("\\:\\:");
+        if (splitStrings.length != 2) {
+          jg.writeStringField("result", "ERROR");
+          jg.writeStringField("message", "query format is not as expected.");
+          jg.close();
+          response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
+          return;
+        }
+        listBeans(jg, new ObjectName(splitStrings[0]), splitStrings[1],
+            response);
+        jg.close();
+        return;
+      }
+
+      // query per mbean
       String qry = request.getParameter("qry");
       String qry = request.getParameter("qry");
       if (qry == null) {
       if (qry == null) {
         qry = "*:*";
         qry = "*:*";
       }
       }
-      listBeans(jg, new ObjectName(qry));
+      listBeans(jg, new ObjectName(qry), null, response);
       jg.close();
       jg.close();
-    } catch (IOException e) {
+
+    } catch ( IOException e ) {
       LOG.error("Caught an exception while processing JMX request", e);
       LOG.error("Caught an exception while processing JMX request", e);
       response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
       response.setStatus(HttpServletResponse.SC_INTERNAL_SERVER_ERROR);
-    } catch (MalformedObjectNameException e) {
+    } catch ( MalformedObjectNameException e ) {
       LOG.error("Caught an exception while processing JMX request", e);
       LOG.error("Caught an exception while processing JMX request", e);
       response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
       response.setStatus(HttpServletResponse.SC_BAD_REQUEST);
     }
     }
   }
   }
 
 
   // --------------------------------------------------------- Private Methods
   // --------------------------------------------------------- Private Methods
-  private void listBeans(JsonGenerator jg, ObjectName qry) throws IOException {
+  private void listBeans(JsonGenerator jg, ObjectName qry, String attribute, 
+      HttpServletResponse response) 
+  throws IOException {
     LOG.debug("Listing beans for "+qry);
     LOG.debug("Listing beans for "+qry);
     Set<ObjectName> names = null;
     Set<ObjectName> names = null;
     names = mBeanServer.queryNames(qry, null);
     names = mBeanServer.queryNames(qry, null);
@@ -178,62 +214,89 @@ public class JMXJsonServlet extends HttpServlet {
     while (it.hasNext()) {
     while (it.hasNext()) {
       ObjectName oname = it.next();
       ObjectName oname = it.next();
       MBeanInfo minfo;
       MBeanInfo minfo;
-      String code;
+      String code = "";
+      Object attributeinfo = null;
       try {
       try {
         minfo = mBeanServer.getMBeanInfo(oname);
         minfo = mBeanServer.getMBeanInfo(oname);
         code = minfo.getClassName();
         code = minfo.getClassName();
+        String prs = "";
         try {
         try {
           if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) {
           if ("org.apache.commons.modeler.BaseModelMBean".equals(code)) {
-            code = (String) mBeanServer.getAttribute(oname, "modelerType");
+            prs = "modelerType";
+            code = (String) mBeanServer.getAttribute(oname, prs);
+          }
+          if (attribute!=null) {
+            prs = attribute;
+            attributeinfo = mBeanServer.getAttribute(oname, prs);
           }
           }
         } catch (AttributeNotFoundException e) {
         } catch (AttributeNotFoundException e) {
-          //Ignored the modelerType attribute was not found, so use the class name instead.
+          // If the modelerType attribute was not found, the class name is used
+          // instead.
+          LOG.error("getting attribute " + prs + " of " + oname
+              + " threw an exception", e);
         } catch (MBeanException e) {
         } catch (MBeanException e) {
-          //The code inside the attribute getter threw an exception so log it, and
-          // fall back on the class name
-          LOG.error("getting attribute modelerType of "+oname+" threw an exception", e);
+          // The code inside the attribute getter threw an exception so log it,
+          // and fall back on the class name
+          LOG.error("getting attribute " + prs + " of " + oname
+              + " threw an exception", e);
         } catch (RuntimeException e) {
         } catch (RuntimeException e) {
-          //For some reason even with an MBeanException available to them Runtime exceptions
-          //can still find their way through, so treat them the same as MBeanException
-          LOG.error("getting attribute modelerType of "+oname+" threw an exception", e);
-        } catch (ReflectionException e) {
-          //This happens when the code inside the JMX bean (setter?? from the java docs)
-          //threw an exception, so log it and fall back on the class name
-          LOG.error("getting attribute modelerType of "+oname+" threw an exception", e);
+          // For some reason even with an MBeanException available to them
+          // Runtime exceptionscan still find their way through, so treat them
+          // the same as MBeanException
+          LOG.error("getting attribute " + prs + " of " + oname
+              + " threw an exception", e);
+        } catch ( ReflectionException e ) {
+          // This happens when the code inside the JMX bean (setter?? from the
+          // java docs) threw an exception, so log it and fall back on the 
+          // class name
+          LOG.error("getting attribute " + prs + " of " + oname
+              + " threw an exception", e);
         }
         }
       } catch (InstanceNotFoundException e) {
       } catch (InstanceNotFoundException e) {
         //Ignored for some reason the bean was not found so don't output it
         //Ignored for some reason the bean was not found so don't output it
         continue;
         continue;
-      } catch (IntrospectionException e) {
-        //This is an internal error, something odd happened with reflection so log it and
-        //don't output the bean.
-        LOG.error("Problem while trying to process JMX query: "+qry+" with MBean "+oname, e); 
+      } catch ( IntrospectionException e ) {
+        // This is an internal error, something odd happened with reflection so
+        // log it and don't output the bean.
+        LOG.error("Problem while trying to process JMX query: " + qry
+            + " with MBean " + oname, e);
         continue;
         continue;
-      } catch (ReflectionException e) {
-        //This happens when the code inside the JMX bean threw an exception, so log it and
-        //don't output the bean.
-        LOG.error("Problem while trying to process JMX query: "+qry+" with MBean "+oname, e);
+      } catch ( ReflectionException e ) {
+        // This happens when the code inside the JMX bean threw an exception, so
+        // log it and don't output the bean.
+        LOG.error("Problem while trying to process JMX query: " + qry
+            + " with MBean " + oname, e);
         continue;
         continue;
       }
       }
 
 
       jg.writeStartObject();
       jg.writeStartObject();
       jg.writeStringField("name", oname.toString());
       jg.writeStringField("name", oname.toString());
-      // can't be null - I think
-
+      
       jg.writeStringField("modelerType", code);
       jg.writeStringField("modelerType", code);
-
-      MBeanAttributeInfo attrs[] = minfo.getAttributes();
-      for (int i = 0; i < attrs.length; i++) {
-        writeAttribute(jg, oname, attrs[i]);
+      if ((attribute != null) && (attributeinfo == null)) {
+        jg.writeStringField("result", "ERROR");
+        jg.writeStringField("message", "No attribute with name " + attribute
+            + " was found.");
+        jg.writeEndObject();
+        jg.writeEndArray();
+        jg.close();
+        response.setStatus(HttpServletResponse.SC_NOT_FOUND);
+        return;
+      }
+      
+      if (attribute != null) {
+        writeAttribute(jg, attribute, attributeinfo);
+      } else {
+        MBeanAttributeInfo attrs[] = minfo.getAttributes();
+        for (int i = 0; i < attrs.length; i++) {
+          writeAttribute(jg, oname, attrs[i]);
+        }
       }
       }
-      //  LOG.error("Caught Error writing value ",t);
-      //  ExceptionUtils.handleThrowable(t);
-      //}
       jg.writeEndObject();
       jg.writeEndObject();
     }
     }
     jg.writeEndArray();
     jg.writeEndArray();
   }
   }
-  
+
   private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeInfo attr) throws IOException {
   private void writeAttribute(JsonGenerator jg, ObjectName oname, MBeanAttributeInfo attr) throws IOException {
     if (!attr.isReadable()) {
     if (!attr.isReadable()) {
       return;
       return;

+ 1 - 2
common/src/java/org/apache/hadoop/metrics/util/MetricsIntValue.java

@@ -97,8 +97,7 @@ public class MetricsIntValue extends MetricsBase {
       try {
       try {
         mr.setMetric(getName(), value);
         mr.setMetric(getName(), value);
       } catch (Exception e) {
       } catch (Exception e) {
-        LOG.info("pushMetric failed for " + getName() + "\n" +
-            StringUtils.stringifyException(e));
+        LOG.info("pushMetric failed for " + getName() + "\n", e);
       }
       }
     }
     }
     changed = false;
     changed = false;

+ 1 - 2
common/src/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingInt.java

@@ -106,8 +106,7 @@ public class MetricsTimeVaryingInt extends MetricsBase {
     try {
     try {
       mr.incrMetric(getName(), getPreviousIntervalValue());
       mr.incrMetric(getName(), getPreviousIntervalValue());
     } catch (Exception e) {
     } catch (Exception e) {
-      LOG.info("pushMetric failed for " + getName() + "\n" +
-          StringUtils.stringifyException(e));
+      LOG.info("pushMetric failed for " + getName() + "\n" , e);
     }
     }
   }
   }
   
   

+ 1 - 2
common/src/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingLong.java

@@ -102,8 +102,7 @@ public class MetricsTimeVaryingLong extends MetricsBase{
     try {
     try {
       mr.incrMetric(getName(), getPreviousIntervalValue());
       mr.incrMetric(getName(), getPreviousIntervalValue());
     } catch (Exception e) {
     } catch (Exception e) {
-      LOG.info("pushMetric failed for " + getName() + "\n" +
-          StringUtils.stringifyException(e));
+      LOG.info("pushMetric failed for " + getName() + "\n" , e);
     }
     }
   }
   }
   
   

+ 1 - 2
common/src/java/org/apache/hadoop/metrics/util/MetricsTimeVaryingRate.java

@@ -150,8 +150,7 @@ public class MetricsTimeVaryingRate extends MetricsBase {
       mr.incrMetric(getName() + "_num_ops", getPreviousIntervalNumOps());
       mr.incrMetric(getName() + "_num_ops", getPreviousIntervalNumOps());
       mr.setMetric(getName() + "_avg_time", getPreviousIntervalAverageTime());
       mr.setMetric(getName() + "_avg_time", getPreviousIntervalAverageTime());
     } catch (Exception e) {
     } catch (Exception e) {
-      LOG.info("pushMetric failed for " + getName() + "\n" +
-          StringUtils.stringifyException(e));
+      LOG.info("pushMetric failed for " + getName() + "\n" , e);
     }
     }
   }
   }
   
   

+ 1 - 1
common/src/java/org/apache/hadoop/net/ScriptBasedMapping.java

@@ -156,7 +156,7 @@ implements Configurable
         s.execute();
         s.execute();
         allOutput.append(s.getOutput() + " ");
         allOutput.append(s.getOutput() + " ");
       } catch (Exception e) {
       } catch (Exception e) {
-        LOG.warn(StringUtils.stringifyException(e));
+        LOG.warn("Exception: ", e);
         return null;
         return null;
       }
       }
       loopCount++; 
       loopCount++; 

+ 2 - 4
common/src/java/org/apache/hadoop/net/SocketIOWithTimeout.java

@@ -288,8 +288,7 @@ abstract class SocketIOWithTimeout {
           try {
           try {
             selector.close();
             selector.close();
           } catch (IOException e) {
           } catch (IOException e) {
-            LOG.warn("Unexpected exception while closing selector : " +
-                     StringUtils.stringifyException(e));
+            LOG.warn("Unexpected exception while closing selector : ", e);
           }
           }
         }
         }
       }    
       }    
@@ -361,8 +360,7 @@ abstract class SocketIOWithTimeout {
         try {
         try {
           info.selector.selectNow();
           info.selector.selectNow();
         } catch (IOException e) {
         } catch (IOException e) {
-          LOG.info("Unexpected Exception while clearing selector : " +
-                   StringUtils.stringifyException(e));
+          LOG.info("Unexpected Exception while clearing selector : ", e);
           // don't put the selector back.
           // don't put the selector back.
           info.close();
           info.close();
           return ret; 
           return ret; 

+ 39 - 0
common/src/java/org/apache/hadoop/security/AnnotatedSecurityInfo.java

@@ -0,0 +1,39 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import org.apache.hadoop.security.token.TokenInfo;
+
+/**
+ * Constructs SecurityInfo from Annotations provided in protocol interface.
+ */
+public class AnnotatedSecurityInfo extends SecurityInfo {
+
+  @Override
+  public KerberosInfo getKerberosInfo(Class<?> protocol) {
+    return protocol.getAnnotation(KerberosInfo.class);
+  }
+
+  @Override
+  public TokenInfo getTokenInfo(Class<?> protocol) {
+    return protocol.getAnnotation(TokenInfo.class);
+  }
+
+  
+}

+ 43 - 0
common/src/java/org/apache/hadoop/security/SecurityInfo.java

@@ -0,0 +1,43 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.security;
+
+import org.apache.hadoop.security.token.TokenInfo;
+
+/**
+ * Interface used by RPC to get the Security information for a given 
+ * protocol.
+ */
+public abstract class SecurityInfo {
+
+  /**
+   * Get the KerberosInfo for a given protocol.
+   * @param protocol interface class
+   * @return KerberosInfo
+   */
+  public abstract KerberosInfo getKerberosInfo(Class<?> protocol);
+
+  /**
+   * Get the TokenInfo for a given protocol.
+   * @param protocol interface class
+   * @return TokenInfo instance
+   */
+  public abstract TokenInfo getTokenInfo(Class<?> protocol);
+
+}

+ 60 - 1
common/src/java/org/apache/hadoop/security/SecurityUtil.java

@@ -22,6 +22,7 @@ import java.net.URI;
 import java.net.URL;
 import java.net.URL;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
 import java.security.AccessController;
 import java.security.AccessController;
+import java.util.ServiceLoader;
 import java.util.Set;
 import java.util.Set;
 
 
 import javax.security.auth.Subject;
 import javax.security.auth.Subject;
@@ -33,8 +34,8 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.token.TokenInfo;
 
 
 import sun.security.jgss.krb5.Krb5Util;
 import sun.security.jgss.krb5.Krb5Util;
 import sun.security.krb5.Credentials;
 import sun.security.krb5.Credentials;
@@ -291,4 +292,62 @@ public class SecurityUtil {
   public static String getHostFromPrincipal(String principalName) {
   public static String getHostFromPrincipal(String principalName) {
     return new KerberosName(principalName).getHostName();
     return new KerberosName(principalName).getHostName();
   }
   }
+
+  private static ServiceLoader<SecurityInfo> securityInfoProviders = 
+    ServiceLoader.load(SecurityInfo.class);
+  private static SecurityInfo[] testProviders = new SecurityInfo[0];
+
+  /**
+   * Test setup method to register additional providers.
+   * @param providers a list of high priority providers to use
+   */
+  @InterfaceAudience.Private
+  public static void setSecurityInfoProviders(SecurityInfo... providers) {
+    testProviders = providers;
+  }
+  
+  /**
+   * Look up the KerberosInfo for a given protocol. It searches all known
+   * SecurityInfo providers.
+   * @param protocol the protocol class to get the information for
+   * @return the KerberosInfo or null if it has no KerberosInfo defined
+   */
+  public static KerberosInfo getKerberosInfo(Class<?> protocol) {
+    for(SecurityInfo provider: testProviders) {
+      KerberosInfo result = provider.getKerberosInfo(protocol);
+      if (result != null) {
+        return result;
+      }
+    }
+    for(SecurityInfo provider: securityInfoProviders) {
+      KerberosInfo result = provider.getKerberosInfo(protocol);
+      if (result != null) {
+        return result;
+      }
+    }
+    return null;
+  }
+ 
+  /**
+   * Look up the TokenInfo for a given protocol. It searches all known
+   * SecurityInfo providers.
+   * @param protocol The protocol class to get the information for.
+   * @return the TokenInfo or null if it has no KerberosInfo defined
+   */
+  public static TokenInfo getTokenInfo(Class<?> protocol) {
+    for(SecurityInfo provider: testProviders) {
+      TokenInfo result = provider.getTokenInfo(protocol);
+      if (result != null) {
+        return result;
+      }      
+    }
+    for(SecurityInfo provider: securityInfoProviders) {
+      TokenInfo result = provider.getTokenInfo(protocol);
+      if (result != null) {
+        return result;
+      }
+    } 
+    return null;
+  }
+
 }
 }

+ 9 - 5
common/src/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -878,17 +878,21 @@ public class UserGroupInformation {
   private static class TestingGroups extends Groups {
   private static class TestingGroups extends Groups {
     private final Map<String, List<String>> userToGroupsMapping = 
     private final Map<String, List<String>> userToGroupsMapping = 
       new HashMap<String,List<String>>();
       new HashMap<String,List<String>>();
+    private Groups underlyingImplementation;
     
     
-    private TestingGroups() {
+    private TestingGroups(Groups underlyingImplementation) {
       super(new org.apache.hadoop.conf.Configuration());
       super(new org.apache.hadoop.conf.Configuration());
+      this.underlyingImplementation = underlyingImplementation;
     }
     }
     
     
     @Override
     @Override
-    public List<String> getGroups(String user) {
+    public List<String> getGroups(String user) throws IOException {
       List<String> result = userToGroupsMapping.get(user);
       List<String> result = userToGroupsMapping.get(user);
+      
       if (result == null) {
       if (result == null) {
-        result = new ArrayList<String>();
+        result = underlyingImplementation.getGroups(user);
       }
       }
+
       return result;
       return result;
     }
     }
 
 
@@ -910,7 +914,7 @@ public class UserGroupInformation {
     UserGroupInformation ugi = createRemoteUser(user);
     UserGroupInformation ugi = createRemoteUser(user);
     // make sure that the testing object is setup
     // make sure that the testing object is setup
     if (!(groups instanceof TestingGroups)) {
     if (!(groups instanceof TestingGroups)) {
-      groups = new TestingGroups();
+      groups = new TestingGroups(groups);
     }
     }
     // add the user groups
     // add the user groups
     ((TestingGroups) groups).setUserGroups(ugi.getShortUserName(), userGroups);
     ((TestingGroups) groups).setUserGroups(ugi.getShortUserName(), userGroups);
@@ -936,7 +940,7 @@ public class UserGroupInformation {
     UserGroupInformation ugi = createProxyUser(user, realUser);
     UserGroupInformation ugi = createProxyUser(user, realUser);
     // make sure that the testing object is setup
     // make sure that the testing object is setup
     if (!(groups instanceof TestingGroups)) {
     if (!(groups instanceof TestingGroups)) {
-      groups = new TestingGroups();
+      groups = new TestingGroups(groups);
     }
     }
     // add the user groups
     // add the user groups
     ((TestingGroups) groups).setUserGroups(ugi.getShortUserName(), userGroups);
     ((TestingGroups) groups).setUserGroups(ugi.getShortUserName(), userGroups);

+ 1 - 3
common/src/java/org/apache/hadoop/security/authorize/ServiceAuthorizationManager.java

@@ -41,8 +41,6 @@ import org.apache.hadoop.security.UserGroupInformation;
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public class ServiceAuthorizationManager {
 public class ServiceAuthorizationManager {
   private static final String HADOOP_POLICY_FILE = "hadoop-policy.xml";
   private static final String HADOOP_POLICY_FILE = "hadoop-policy.xml";
-  private static final Log LOG = LogFactory
-  .getLog(ServiceAuthorizationManager.class);
 
 
   private Map<Class<?>, AccessControlList> protocolToAcl =
   private Map<Class<?>, AccessControlList> protocolToAcl =
     new IdentityHashMap<Class<?>, AccessControlList>();
     new IdentityHashMap<Class<?>, AccessControlList>();
@@ -86,7 +84,7 @@ public class ServiceAuthorizationManager {
     }
     }
     
     
     // get client principal key to verify (if available)
     // get client principal key to verify (if available)
-    KerberosInfo krbInfo = protocol.getAnnotation(KerberosInfo.class);
+    KerberosInfo krbInfo = SecurityUtil.getKerberosInfo(protocol);
     String clientPrincipal = null; 
     String clientPrincipal = null; 
     if (krbInfo != null) {
     if (krbInfo != null) {
       String clientKey = krbInfo.clientPrincipal();
       String clientKey = krbInfo.clientPrincipal();

+ 1 - 2
common/src/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -373,8 +373,7 @@ extends AbstractDelegationTokenIdentifier>
               rollMasterKey();
               rollMasterKey();
               lastMasterKeyUpdate = now;
               lastMasterKeyUpdate = now;
             } catch (IOException e) {
             } catch (IOException e) {
-              LOG.error("Master key updating failed. "
-                  + StringUtils.stringifyException(e));
+              LOG.error("Master key updating failed: ", e);
             }
             }
           }
           }
           if (lastTokenCacheCleanup + tokenRemoverScanInterval < now) {
           if (lastTokenCacheCleanup + tokenRemoverScanInterval < now) {

+ 66 - 0
common/src/java/org/apache/hadoop/util/ProtoUtil.java

@@ -0,0 +1,66 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.util;
+
+import java.io.DataInput;
+import java.io.IOException;
+
+public abstract class ProtoUtil {
+
+  /**
+   * Read a variable length integer in the same format that ProtoBufs encodes.
+   * @param in the input stream to read from
+   * @return the integer
+   * @throws IOException if it is malformed or EOF.
+   */
+  public static int readRawVarint32(DataInput in) throws IOException {
+    byte tmp = in.readByte();
+    if (tmp >= 0) {
+      return tmp;
+    }
+    int result = tmp & 0x7f;
+    if ((tmp = in.readByte()) >= 0) {
+      result |= tmp << 7;
+    } else {
+      result |= (tmp & 0x7f) << 7;
+      if ((tmp = in.readByte()) >= 0) {
+        result |= tmp << 14;
+      } else {
+        result |= (tmp & 0x7f) << 14;
+        if ((tmp = in.readByte()) >= 0) {
+          result |= tmp << 21;
+        } else {
+          result |= (tmp & 0x7f) << 21;
+          result |= (tmp = in.readByte()) << 28;
+          if (tmp < 0) {
+            // Discard upper 32 bits.
+            for (int i = 0; i < 5; i++) {
+              if (in.readByte() >= 0) {
+                return result;
+              }
+            }
+            throw new IOException("Malformed varint");
+          }
+        }
+      }
+    }
+    return result;
+  }
+
+}

+ 3 - 0
common/src/native/Makefile.am

@@ -34,6 +34,7 @@ export PLATFORM = $(shell echo $$OS_NAME | tr [A-Z] [a-z])
 ACLOCAL_AMFLAGS = -I m4 
 ACLOCAL_AMFLAGS = -I m4 
 AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src \
 AM_CPPFLAGS = @JNI_CPPFLAGS@ -I$(HADOOP_NATIVE_SRCDIR)/src \
               -Isrc/org/apache/hadoop/io/compress/zlib \
               -Isrc/org/apache/hadoop/io/compress/zlib \
+              -Isrc/org/apache/hadoop/io/compress/snappy \
               -Isrc/org/apache/hadoop/security \
               -Isrc/org/apache/hadoop/security \
               -Isrc/org/apache/hadoop/io/nativeio/
               -Isrc/org/apache/hadoop/io/nativeio/
 AM_LDFLAGS = @JNI_LDFLAGS@
 AM_LDFLAGS = @JNI_LDFLAGS@
@@ -46,6 +47,8 @@ endif
 lib_LTLIBRARIES = libhadoop.la
 lib_LTLIBRARIES = libhadoop.la
 libhadoop_la_SOURCES = src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c \
 libhadoop_la_SOURCES = src/org/apache/hadoop/io/compress/zlib/ZlibCompressor.c \
                        src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c \
                        src/org/apache/hadoop/io/compress/zlib/ZlibDecompressor.c \
+                       src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c \
+                       src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c \
                        src/org/apache/hadoop/security/getGroup.c \
                        src/org/apache/hadoop/security/getGroup.c \
                        src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c \
                        src/org/apache/hadoop/security/JniBasedUnixGroupsMapping.c \
                        src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c \
                        src/org/apache/hadoop/security/JniBasedUnixGroupsNetgroupMapping.c \

+ 3 - 0
common/src/native/configure.ac

@@ -88,6 +88,9 @@ AC_SUBST([JNI_CPPFLAGS])
 dnl Check for zlib headers
 dnl Check for zlib headers
 AC_CHECK_HEADERS([zlib.h zconf.h], AC_COMPUTE_NEEDED_DSO(z,HADOOP_ZLIB_LIBRARY), AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
 AC_CHECK_HEADERS([zlib.h zconf.h], AC_COMPUTE_NEEDED_DSO(z,HADOOP_ZLIB_LIBRARY), AC_MSG_ERROR(Zlib headers were not found... native-hadoop library needs zlib to build. Please install the requisite zlib development package.))
 
 
+dnl Check for snappy headers
+AC_CHECK_HEADERS([snappy-c.h], AC_COMPUTE_NEEDED_DSO(snappy,HADOOP_SNAPPY_LIBRARY), AC_MSG_WARN(Snappy headers were not found... building without snappy.))
+
 dnl Check for headers needed by the native Group resolution implementation
 dnl Check for headers needed by the native Group resolution implementation
 AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
 AC_CHECK_HEADERS([fcntl.h stdlib.h string.h unistd.h], [], AC_MSG_ERROR(Some system headers not found... please ensure their presence on your platform.))
 
 

+ 13 - 0
common/src/native/packageNativeHadoop.sh

@@ -62,4 +62,17 @@ then
   done  
   done  
 fi
 fi
 
 
+if [ "${BUNDLE_SNAPPY_LIB}" = "true" ]
+then
+ if [ -d ${SNAPPY_LIB_DIR} ]
+ then
+   echo "Copying Snappy library in ${SNAPPY_LIB_DIR} to $DIST_LIB_DIR/"
+   cd ${SNAPPY_LIB_DIR}
+   $TAR . | (cd $DIST_LIB_DIR/; $UNTAR)
+ else
+   echo "Snappy lib directory ${SNAPPY_LIB_DIR} does not exist"
+   exit 1
+ fi
+fi
+
 #vim: ts=2: sw=2: et
 #vim: ts=2: sw=2: et

+ 127 - 0
common/src/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c

@@ -0,0 +1,127 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined HAVE_CONFIG_H
+  #include <config.h>
+#endif
+
+#if defined HADOOP_SNAPPY_LIBRARY
+
+#if defined HAVE_STDIO_H
+  #include <stdio.h>
+#else
+  #error 'stdio.h not found'
+#endif
+
+#if defined HAVE_STDLIB_H
+  #include <stdlib.h>
+#else
+  #error 'stdlib.h not found'
+#endif
+
+#if defined HAVE_STRING_H
+  #include <string.h>
+#else
+  #error 'string.h not found'
+#endif
+
+#if defined HAVE_DLFCN_H
+  #include <dlfcn.h>
+#else
+  #error 'dlfcn.h not found'
+#endif
+
+#include "org_apache_hadoop_io_compress_snappy.h"
+#include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
+
+static jfieldID SnappyCompressor_clazz;
+static jfieldID SnappyCompressor_uncompressedDirectBuf;
+static jfieldID SnappyCompressor_uncompressedDirectBufLen;
+static jfieldID SnappyCompressor_compressedDirectBuf;
+static jfieldID SnappyCompressor_directBufferSize;
+
+static snappy_status (*dlsym_snappy_compress)(const char*, size_t, char*, size_t*);
+
+JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_initIDs
+(JNIEnv *env, jclass clazz){
+
+  // Load libsnappy.so
+  void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
+  if (!libsnappy) {
+    char* msg = (char*)malloc(1000);
+    snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_SNAPPY_LIBRARY, dlerror());
+    THROW(env, "java/lang/UnsatisfiedLinkError", msg);
+    return;
+  }
+
+  // Locate the requisite symbols from libsnappy.so
+  dlerror();                                 // Clear any existing error
+  LOAD_DYNAMIC_SYMBOL(dlsym_snappy_compress, env, libsnappy, "snappy_compress");
+
+  SnappyCompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
+                                                 "Ljava/lang/Class;");
+  SnappyCompressor_uncompressedDirectBuf = (*env)->GetFieldID(env, clazz,
+                                                           "uncompressedDirectBuf",
+                                                           "Ljava/nio/Buffer;");
+  SnappyCompressor_uncompressedDirectBufLen = (*env)->GetFieldID(env, clazz,
+                                                              "uncompressedDirectBufLen", "I");
+  SnappyCompressor_compressedDirectBuf = (*env)->GetFieldID(env, clazz,
+                                                         "compressedDirectBuf",
+                                                         "Ljava/nio/Buffer;");
+  SnappyCompressor_directBufferSize = (*env)->GetFieldID(env, clazz,
+                                                       "directBufferSize", "I");
+}
+
+JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_compressBytesDirect
+(JNIEnv *env, jobject thisj){
+  // Get members of SnappyCompressor
+  jobject clazz = (*env)->GetStaticObjectField(env, thisj, SnappyCompressor_clazz);
+  jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_uncompressedDirectBuf);
+  jint uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen);
+  jobject compressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_compressedDirectBuf);
+  jint compressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyCompressor_directBufferSize);
+
+  // Get the input direct buffer
+  LOCK_CLASS(env, clazz, "SnappyCompressor");
+  const char* uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+  UNLOCK_CLASS(env, clazz, "SnappyCompressor");
+
+  if (uncompressed_bytes == 0) {
+    return (jint)0;
+  }
+
+  // Get the output direct buffer
+  LOCK_CLASS(env, clazz, "SnappyCompressor");
+  char* compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+  UNLOCK_CLASS(env, clazz, "SnappyCompressor");
+
+  if (compressed_bytes == 0) {
+    return (jint)0;
+  }
+
+  snappy_status ret = dlsym_snappy_compress(uncompressed_bytes, uncompressed_direct_buf_len, compressed_bytes, &compressed_direct_buf_len);
+  if (ret != SNAPPY_OK){
+    THROW(env, "Ljava/lang/InternalError", "Could not compress data. Buffer length is too small.");
+  }
+
+  (*env)->SetIntField(env, thisj, SnappyCompressor_uncompressedDirectBufLen, 0);
+
+  return (jint)compressed_direct_buf_len;
+}
+
+#endif //define HADOOP_SNAPPY_LIBRARY

+ 131 - 0
common/src/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c

@@ -0,0 +1,131 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#if defined HAVE_CONFIG_H
+  #include <config.h>
+#endif
+
+#if defined HADOOP_SNAPPY_LIBRARY
+
+#if defined HAVE_STDIO_H
+  #include <stdio.h>
+#else
+  #error 'stdio.h not found'
+#endif
+
+#if defined HAVE_STDLIB_H
+  #include <stdlib.h>
+#else
+  #error 'stdlib.h not found'
+#endif
+
+#if defined HAVE_STRING_H
+  #include <string.h>
+#else
+  #error 'string.h not found'
+#endif
+
+#if defined HAVE_DLFCN_H
+  #include <dlfcn.h>
+#else
+  #error 'dlfcn.h not found'
+#endif
+
+#include "org_apache_hadoop_io_compress_snappy.h"
+#include "org_apache_hadoop_io_compress_snappy_SnappyDecompressor.h"
+
+static jfieldID SnappyDecompressor_clazz;
+static jfieldID SnappyDecompressor_compressedDirectBuf;
+static jfieldID SnappyDecompressor_compressedDirectBufLen;
+static jfieldID SnappyDecompressor_uncompressedDirectBuf;
+static jfieldID SnappyDecompressor_directBufferSize;
+
+static snappy_status (*dlsym_snappy_uncompress)(const char*, size_t, char*, size_t*);
+
+JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_initIDs
+(JNIEnv *env, jclass clazz){
+
+  // Load libsnappy.so
+  void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
+  if (!libsnappy) {
+    char* msg = (char*)malloc(1000);
+    snprintf(msg, 1000, "%s (%s)!", "Cannot load " HADOOP_SNAPPY_LIBRARY, dlerror());
+    THROW(env, "java/lang/UnsatisfiedLinkError", msg);
+    return;
+  }
+
+  // Locate the requisite symbols from libsnappy.so
+  dlerror();                                 // Clear any existing error
+  LOAD_DYNAMIC_SYMBOL(dlsym_snappy_uncompress, env, libsnappy, "snappy_uncompress");
+
+  SnappyDecompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
+                                                   "Ljava/lang/Class;");
+  SnappyDecompressor_compressedDirectBuf = (*env)->GetFieldID(env,clazz,
+                                                           "compressedDirectBuf",
+                                                           "Ljava/nio/Buffer;");
+  SnappyDecompressor_compressedDirectBufLen = (*env)->GetFieldID(env,clazz,
+                                                              "compressedDirectBufLen", "I");
+  SnappyDecompressor_uncompressedDirectBuf = (*env)->GetFieldID(env,clazz,
+                                                             "uncompressedDirectBuf",
+                                                             "Ljava/nio/Buffer;");
+  SnappyDecompressor_directBufferSize = (*env)->GetFieldID(env, clazz,
+                                                         "directBufferSize", "I");
+}
+
+JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_decompressBytesDirect
+(JNIEnv *env, jobject thisj){
+  // Get members of SnappyDecompressor
+  jobject clazz = (*env)->GetStaticObjectField(env,thisj, SnappyDecompressor_clazz);
+  jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_compressedDirectBuf);
+  jint compressed_direct_buf_len = (*env)->GetIntField(env,thisj, SnappyDecompressor_compressedDirectBufLen);
+  jobject uncompressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_uncompressedDirectBuf);
+  size_t uncompressed_direct_buf_len = (*env)->GetIntField(env, thisj, SnappyDecompressor_directBufferSize);
+
+  // Get the input direct buffer
+  LOCK_CLASS(env, clazz, "SnappyDecompressor");
+  const char* compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+  UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
+
+  if (compressed_bytes == 0) {
+    return (jint)0;
+  }
+
+  // Get the output direct buffer
+  LOCK_CLASS(env, clazz, "SnappyDecompressor");
+  char* uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+  UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
+
+  if (uncompressed_bytes == 0) {
+    return (jint)0;
+  }
+
+  snappy_status ret = dlsym_snappy_uncompress(compressed_bytes, compressed_direct_buf_len, uncompressed_bytes, &uncompressed_direct_buf_len);
+  if (ret == SNAPPY_BUFFER_TOO_SMALL){
+    THROW(env, "Ljava/lang/InternalError", "Could not decompress data. Buffer length is too small.");
+  } else if (ret == SNAPPY_INVALID_INPUT){
+    THROW(env, "Ljava/lang/InternalError", "Could not decompress data. Input is invalid.");
+  } else if (ret != SNAPPY_OK){
+    THROW(env, "Ljava/lang/InternalError", "Could not decompress data.");
+  }
+
+  (*env)->SetIntField(env, thisj, SnappyDecompressor_compressedDirectBufLen, 0);
+
+  return (jint)uncompressed_direct_buf_len;
+}
+
+#endif //define HADOOP_SNAPPY_LIBRARY

+ 58 - 0
common/src/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h

@@ -0,0 +1,58 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+
+#if !defined ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
+#define ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
+
+
+#if defined HAVE_CONFIG_H
+  #include <config.h>
+#endif
+
+#if defined HADOOP_SNAPPY_LIBRARY
+
+  #if defined HAVE_STDDEF_H
+    #include <stddef.h>
+  #else
+    #error 'stddef.h not found'
+  #endif
+
+  #if defined HAVE_SNAPPY_C_H
+    #include <snappy-c.h>
+  #else
+    #error 'Please install snappy-development packages for your platform.'
+  #endif
+
+  #if defined HAVE_DLFCN_H
+    #include <dlfcn.h>
+  #else
+    #error "dlfcn.h not found"
+  #endif
+
+  #if defined HAVE_JNI_H
+    #include <jni.h>
+  #else
+    #error 'jni.h not found'
+  #endif
+
+  #include "org_apache_hadoop.h"
+
+#endif //define HADOOP_SNAPPY_LIBRARY
+
+#endif //ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H

+ 1 - 1
common/src/saveVersion.sh

@@ -26,7 +26,7 @@ build_dir=$2
 user=`whoami | tr '\n\r' '\n'`
 user=`whoami | tr '\n\r' '\n'`
 date=`date`
 date=`date`
 cwd=`pwd`
 cwd=`pwd`
-if [ -d .git ]; then
+if git rev-parse HEAD 2>/dev/null > /dev/null ; then
   revision=`git log -1 --pretty=format:"%H"`
   revision=`git log -1 --pretty=format:"%H"`
   hostname=`hostname`
   hostname=`hostname`
   branch=`git branch | sed -n -e 's/^* //p'`
   branch=`git branch | sed -n -e 's/^* //p'`

+ 72 - 0
common/src/test/bin/smart-apply-patch.sh

@@ -0,0 +1,72 @@
+#!/usr/bin/env bash
+#   Licensed under the Apache License, Version 2.0 (the "License");
+#   you may not use this file except in compliance with the License.
+#   You may obtain a copy of the License at
+#
+#       http://www.apache.org/licenses/LICENSE-2.0
+#
+#   Unless required by applicable law or agreed to in writing, software
+#   distributed under the License is distributed on an "AS IS" BASIS,
+#   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+#   See the License for the specific language governing permissions and
+#   limitations under the License.
+
+set -e
+
+PATCH_FILE=$1
+if [ -z "$PATCH_FILE" ]; then
+  echo usage: $0 patch-file
+  exit 1
+fi
+
+PATCH=${PATCH:-patch} # allow overriding patch binary
+
+# Cleanup handler for temporary files
+TOCLEAN=""
+cleanup() {
+  rm $TOCLEAN
+  exit $1
+}
+trap "cleanup 1" HUP INT QUIT TERM
+
+# Allow passing "-" for stdin patches
+if [ "$PATCH_FILE" == "-" ]; then
+  PATCH_FILE=/tmp/tmp.in.$$
+  cat /dev/fd/0 > $PATCH_FILE
+  TOCLEAN="$TOCLEAN $PATCH_FILE"
+fi
+
+# Come up with a list of changed files into $TMP
+TMP=/tmp/tmp.paths.$$
+TOCLEAN="$TOCLEAN $TMP"
+grep '^+++\|^---' $PATCH_FILE | cut -c '5-' | grep -v /dev/null | sort | uniq > $TMP
+
+# Assume p0 to start
+PLEVEL=0
+
+# if all of the lines start with a/ or b/, then this is a git patch that
+# was generated without --no-prefix
+if ! grep -qv '^a/\|^b/' $TMP ; then
+  echo Looks like this is a git patch. Stripping a/ and b/ prefixes
+  echo and incrementing PLEVEL
+  PLEVEL=$[$PLEVEL + 1]
+  sed -i -e 's,^[ab]/,,' $TMP
+fi
+
+# if all of the lines start with common/, hdfs/, or mapreduce/, this is
+# relative to the hadoop root instead of the subproject root, so we need
+# to chop off another layer
+PREFIX_DIRS=$(cut -d '/' -f 1 $TMP | sort | uniq)
+if [[ "$PREFIX_DIRS" =~ ^(hdfs|common|mapreduce)$ ]]; then
+
+  echo Looks like this is relative to project root. Increasing PLEVEL
+  PLEVEL=$[$PLEVEL + 1]
+elif ! echo "$PREFIX_DIRS" | grep -vxq 'common\|hdfs\|mapreduce' ; then
+  echo Looks like this is a cross-subproject patch. Not supported!
+  exit 1
+fi
+
+echo Going to apply patch with: $PATCH -p$PLEVEL
+$PATCH -p$PLEVEL -E < $PATCH_FILE
+
+cleanup 0

+ 4 - 2
common/src/test/bin/test-patch.sh

@@ -18,7 +18,8 @@ ulimit -n 1024
 ### Setup some variables.  
 ### Setup some variables.  
 ### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process
 ### SVN_REVISION and BUILD_URL are set by Hudson if it is run by patch process
 ### Read variables from properties file
 ### Read variables from properties file
-. `dirname $0`/../test-patch.properties
+bindir=$(dirname $0)
+. $bindir/../test-patch.properties
 
 
 ###############################################################################
 ###############################################################################
 parseArgs() {
 parseArgs() {
@@ -270,7 +271,8 @@ applyPatch () {
   echo "======================================================================"
   echo "======================================================================"
   echo ""
   echo ""
   echo ""
   echo ""
-  $PATCH -E -p0 < $PATCH_DIR/patch
+  export PATCH
+  $bindir/smart-apply-patch.sh $PATCH_DIR/patch
   if [[ $? != 0 ]] ; then
   if [[ $? != 0 ]] ; then
     echo "PATCH APPLICATION FAILED"
     echo "PATCH APPLICATION FAILED"
     JIRA_COMMENT="$JIRA_COMMENT
     JIRA_COMMENT="$JIRA_COMMENT

+ 13 - 2
common/src/test/core/org/apache/hadoop/conf/TestConfiguration.java

@@ -33,6 +33,7 @@ import java.util.regex.Pattern;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 import static org.junit.Assert.assertArrayEquals;
 import static org.junit.Assert.assertArrayEquals;
 
 
+import org.apache.commons.lang.StringUtils;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.codehaus.jackson.map.ObjectMapper; 
 import org.codehaus.jackson.map.ObjectMapper; 
 
 
@@ -246,7 +247,12 @@ public class TestConfiguration extends TestCase {
 
 
   public void testGetLocalPath() throws IOException {
   public void testGetLocalPath() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set("dirs", "a, b, c ");
+    String[] dirs = new String[]{"a", "b", "c"};
+    for (int i = 0; i < dirs.length; i++) {
+      dirs[i] = new Path(System.getProperty("test.build.data"), dirs[i])
+          .toString();
+    }
+    conf.set("dirs", StringUtils.join(dirs, ","));
     for (int i = 0; i < 1000; i++) {
     for (int i = 0; i < 1000; i++) {
       String localPath = conf.getLocalPath("dirs", "dir" + i).toString();
       String localPath = conf.getLocalPath("dirs", "dir" + i).toString();
       assertTrue("Path doesn't end in specified dir: " + localPath,
       assertTrue("Path doesn't end in specified dir: " + localPath,
@@ -258,7 +264,12 @@ public class TestConfiguration extends TestCase {
   
   
   public void testGetFile() throws IOException {
   public void testGetFile() throws IOException {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
-    conf.set("dirs", "a, b, c ");
+    String[] dirs = new String[]{"a", "b", "c"};
+    for (int i = 0; i < dirs.length; i++) {
+      dirs[i] = new Path(System.getProperty("test.build.data"), dirs[i])
+          .toString();
+    }
+    conf.set("dirs", StringUtils.join(dirs, ","));
     for (int i = 0; i < 1000; i++) {
     for (int i = 0; i < 1000; i++) {
       String localPath = conf.getFile("dirs", "dir" + i).toString();
       String localPath = conf.getFile("dirs", "dir" + i).toString();
       assertTrue("Path doesn't end in specified dir: " + localPath,
       assertTrue("Path doesn't end in specified dir: " + localPath,

+ 29 - 4
common/src/test/core/org/apache/hadoop/fs/FSMainOperationsBaseTest.java

@@ -25,6 +25,7 @@ import java.io.InputStream;
 
 
 
 
 import org.apache.hadoop.fs.Options.Rename;
 import org.apache.hadoop.fs.Options.Rename;
+import org.apache.hadoop.fs.permission.FsPermission;
 import org.junit.After;
 import org.junit.After;
 import org.junit.Assert;
 import org.junit.Assert;
 import org.junit.Before;
 import org.junit.Before;
@@ -252,8 +253,9 @@ public abstract class FSMainOperationsBaseTest  {
     }
     }
   } 
   } 
   
   
+  @Test
   public void testListStatusThrowsExceptionForNonExistentFile()
   public void testListStatusThrowsExceptionForNonExistentFile()
-                                                    throws Exception {
+  throws Exception {
     try {
     try {
       fSys.listStatus(getTestRootPath(fSys, "test/hadoop/file"));
       fSys.listStatus(getTestRootPath(fSys, "test/hadoop/file"));
       Assert.fail("Should throw FileNotFoundException");
       Assert.fail("Should throw FileNotFoundException");
@@ -262,6 +264,27 @@ public abstract class FSMainOperationsBaseTest  {
     }
     }
   }
   }
   
   
+  // TODO: update after fixing HADOOP-7352
+  @Test
+  public void testListStatusThrowsExceptionForUnreadableDir()
+  throws Exception {
+    Path testRootDir = getTestRootPath(fSys, "test/hadoop/dir");
+    Path obscuredDir = new Path(testRootDir, "foo");
+    Path subDir = new Path(obscuredDir, "bar"); //so foo is non-empty
+    fSys.mkdirs(subDir);
+    fSys.setPermission(obscuredDir, new FsPermission((short)0)); //no access
+    try {
+      fSys.listStatus(obscuredDir);
+      Assert.fail("Should throw IOException");
+    } catch (IOException ioe) {
+      // expected
+    } finally {
+      // make sure the test directory can be deleted
+      fSys.setPermission(obscuredDir, new FsPermission((short)0755)); //default
+    }
+  }
+
+
   @Test
   @Test
   public void testListStatus() throws Exception {
   public void testListStatus() throws Exception {
     Path[] testDirs = {
     Path[] testDirs = {
@@ -315,6 +338,7 @@ public abstract class FSMainOperationsBaseTest  {
     
     
   }
   }
   
   
+  @Test
   public void testListStatusFilterWithSomeMatches() throws Exception {
   public void testListStatusFilterWithSomeMatches() throws Exception {
     Path[] testDirs = {
     Path[] testDirs = {
         getTestRootPath(fSys, TEST_DIR_AAA),
         getTestRootPath(fSys, TEST_DIR_AAA),
@@ -919,12 +943,13 @@ public abstract class FSMainOperationsBaseTest  {
 
 
   @Test
   @Test
   public void testRenameDirectoryAsNonExistentDirectory() throws Exception {
   public void testRenameDirectoryAsNonExistentDirectory() throws Exception {
-    testRenameDirectoryAsNonExistentDirectory(Rename.NONE);
+    doTestRenameDirectoryAsNonExistentDirectory(Rename.NONE);
     tearDown();
     tearDown();
-    testRenameDirectoryAsNonExistentDirectory(Rename.OVERWRITE);
+    doTestRenameDirectoryAsNonExistentDirectory(Rename.OVERWRITE);
   }
   }
 
 
-  private void testRenameDirectoryAsNonExistentDirectory(Rename... options) throws Exception {
+  private void doTestRenameDirectoryAsNonExistentDirectory(Rename... options) 
+  throws Exception {
     if (!renameSupported()) return;
     if (!renameSupported()) return;
     
     
     Path src = getTestRootPath(fSys, "test/hadoop/dir");
     Path src = getTestRootPath(fSys, "test/hadoop/dir");

+ 28 - 3
common/src/test/core/org/apache/hadoop/http/TestHtmlQuoting.java

@@ -17,11 +17,12 @@
  */
  */
 package org.apache.hadoop.http;
 package org.apache.hadoop.http;
 
 
-import static org.junit.Assert.assertEquals;
-import static org.junit.Assert.assertFalse;
-import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.*;
+
+import javax.servlet.http.HttpServletRequest;
 
 
 import org.junit.Test;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 
 public class TestHtmlQuoting {
 public class TestHtmlQuoting {
 
 
@@ -62,4 +63,28 @@ public class TestHtmlQuoting {
     }
     }
     runRoundTrip(buffer.toString());
     runRoundTrip(buffer.toString());
   }
   }
+  
+
+  @Test
+  public void testRequestQuoting() throws Exception {
+    HttpServletRequest mockReq = Mockito.mock(HttpServletRequest.class);
+    HttpServer.QuotingInputFilter.RequestQuoter quoter =
+      new HttpServer.QuotingInputFilter.RequestQuoter(mockReq);
+    
+    Mockito.doReturn("a<b").when(mockReq).getParameter("x");
+    assertEquals("Test simple param quoting",
+        "a&lt;b", quoter.getParameter("x"));
+    
+    Mockito.doReturn(null).when(mockReq).getParameter("x");
+    assertEquals("Test that missing parameters dont cause NPE",
+        null, quoter.getParameter("x"));
+
+    Mockito.doReturn(new String[]{"a<b", "b"}).when(mockReq).getParameterValues("x");
+    assertArrayEquals("Test escaping of an array",
+        new String[]{"a&lt;b", "b"}, quoter.getParameterValues("x"));
+
+    Mockito.doReturn(null).when(mockReq).getParameterValues("x");
+    assertArrayEquals("Test that missing parameters dont cause NPE for array",
+        null, quoter.getParameterValues("x"));
+  }
 }
 }

+ 26 - 0
common/src/test/core/org/apache/hadoop/http/TestHttpServer.java

@@ -45,16 +45,20 @@ import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletRequestWrapper;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
 
 
+import junit.framework.Assert;
+
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
+import org.apache.hadoop.http.HttpServer.QuotingInputFilter.RequestQuoter;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.Groups;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.ShellBasedUnixGroupsMapping;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.apache.hadoop.security.authorize.AccessControlList;
 import org.junit.AfterClass;
 import org.junit.AfterClass;
 import org.junit.BeforeClass;
 import org.junit.BeforeClass;
 import org.junit.Test;
 import org.junit.Test;
+import org.mockito.Mockito;
 
 
 public class TestHttpServer extends HttpServerFunctionalTest {
 public class TestHttpServer extends HttpServerFunctionalTest {
   private static HttpServer server;
   private static HttpServer server;
@@ -379,4 +383,26 @@ public class TestHttpServer extends HttpServerFunctionalTest {
     }
     }
     myServer.stop();
     myServer.stop();
   }
   }
+  
+  @Test
+  public void testRequestQuoterWithNull() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    Mockito.doReturn(null).when(request).getParameterValues("dummy");
+    RequestQuoter requestQuoter = new RequestQuoter(request);
+    String[] parameterValues = requestQuoter.getParameterValues("dummy");
+    Assert.assertEquals("It should return null "
+        + "when there are no values for the parameter", null, parameterValues);
+  }
+
+  @Test
+  public void testRequestQuoterWithNotNull() throws Exception {
+    HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
+    String[] values = new String[] { "abc", "def" };
+    Mockito.doReturn(values).when(request).getParameterValues("dummy");
+    RequestQuoter requestQuoter = new RequestQuoter(request);
+    String[] parameterValues = requestQuoter.getParameterValues("dummy");
+    Assert.assertTrue("It should return Parameter Values", Arrays.equals(
+        values, parameterValues));
+  }
+
 }
 }

+ 42 - 2
common/src/test/core/org/apache/hadoop/io/TestBytesWritable.java

@@ -17,13 +17,17 @@
  */
  */
 package org.apache.hadoop.io;
 package org.apache.hadoop.io;
 
 
-import junit.framework.TestCase;
+import org.junit.Test;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
 
 
 /**
 /**
  * This is the unit test for BytesWritable.
  * This is the unit test for BytesWritable.
  */
  */
-public class TestBytesWritable extends TestCase {
+public class TestBytesWritable {
 
 
+  @Test
   public void testSizeChange() throws Exception {
   public void testSizeChange() throws Exception {
     byte[] hadoop = "hadoop".getBytes();
     byte[] hadoop = "hadoop".getBytes();
     BytesWritable buf = new BytesWritable(hadoop);
     BytesWritable buf = new BytesWritable(hadoop);
@@ -50,6 +54,7 @@ public class TestBytesWritable extends TestCase {
     assertEquals(hadoop[0], buf.getBytes()[0]);
     assertEquals(hadoop[0], buf.getBytes()[0]);
   }
   }
   
   
+  @Test
   public void testHash() throws Exception {
   public void testHash() throws Exception {
     byte[] owen = "owen".getBytes();
     byte[] owen = "owen".getBytes();
     BytesWritable buf = new BytesWritable(owen);
     BytesWritable buf = new BytesWritable(owen);
@@ -60,6 +65,7 @@ public class TestBytesWritable extends TestCase {
     assertEquals(1, buf.hashCode());
     assertEquals(1, buf.hashCode());
   }
   }
   
   
+  @Test
   public void testCompare() throws Exception {
   public void testCompare() throws Exception {
     byte[][] values = new byte[][]{"abc".getBytes(), 
     byte[][] values = new byte[][]{"abc".getBytes(), 
                                    "ad".getBytes(),
                                    "ad".getBytes(),
@@ -88,10 +94,44 @@ public class TestBytesWritable extends TestCase {
     assertEquals(expected, actual);
     assertEquals(expected, actual);
   }
   }
 
 
+  @Test
   public void testToString() {
   public void testToString() {
     checkToString(new byte[]{0,1,2,0x10}, "00 01 02 10");
     checkToString(new byte[]{0,1,2,0x10}, "00 01 02 10");
     checkToString(new byte[]{-0x80, -0x7f, -0x1, -0x2, 1, 0}, 
     checkToString(new byte[]{-0x80, -0x7f, -0x1, -0x2, 1, 0}, 
                   "80 81 ff fe 01 00");
                   "80 81 ff fe 01 00");
   }
   }
+  /**
+   * This test was written as result of adding the new zero
+   * copy constructor and set method to BytesWritable. These
+   * methods allow users to specify the backing buffer of the
+   * BytesWritable instance and a length. 
+   */
+  @Test
+  public void testZeroCopy() {
+    byte[] bytes = "brock".getBytes();
+    BytesWritable zeroBuf = new BytesWritable(bytes, bytes.length); // new
+    BytesWritable copyBuf = new BytesWritable(bytes); // old
+    // using zero copy constructor shouldn't result in a copy
+    assertTrue("copy took place, backing array != array passed to constructor",
+      bytes == zeroBuf.getBytes());
+    assertTrue("length of BW should backing byte array", zeroBuf.getLength() == bytes.length);
+    assertEquals("objects with same backing array should be equal", zeroBuf, copyBuf);
+    assertEquals("string repr of objects with same backing array should be equal", 
+        zeroBuf.toString(), copyBuf.toString());
+    assertTrue("compare order objects with same backing array should be equal", 
+        zeroBuf.compareTo(copyBuf) == 0);
+    assertTrue("hash of objects with same backing array should be equal",
+        zeroBuf.hashCode() == copyBuf.hashCode());
+    
+    // ensure expanding buffer is handled correctly
+    // for buffers created with zero copy api
+    byte[] buffer = new byte[bytes.length * 5];
+    zeroBuf.set(buffer, 0, buffer.length); // expand internal buffer
+    zeroBuf.set(bytes, 0, bytes.length); // set back to normal contents
+    assertEquals("buffer created with (array, len) has bad contents", 
+        zeroBuf, copyBuf);
+    assertTrue("buffer created with (array, len) has bad length",
+        zeroBuf.getLength() == copyBuf.getLength());
+  }
 }
 }
 
 

+ 45 - 0
common/src/test/core/org/apache/hadoop/io/TestIOUtils.java

@@ -18,6 +18,9 @@
 
 
 package org.apache.hadoop.io;
 package org.apache.hadoop.io;
 
 
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.fail;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.io.InputStream;
 import java.io.InputStream;
 import java.io.OutputStream;
 import java.io.OutputStream;
@@ -65,4 +68,46 @@ public class TestIOUtils {
     Mockito.verify(inputStream, Mockito.atMost(0)).close();
     Mockito.verify(inputStream, Mockito.atMost(0)).close();
     Mockito.verify(outputStream, Mockito.atMost(0)).close();
     Mockito.verify(outputStream, Mockito.atMost(0)).close();
   }
   }
+  
+  @Test
+  public void testCopyBytesWithCountShouldCloseStreamsWhenCloseIsTrue()
+      throws Exception {
+    InputStream inputStream = Mockito.mock(InputStream.class);
+    OutputStream outputStream = Mockito.mock(OutputStream.class);
+    Mockito.doReturn(-1).when(inputStream).read(new byte[4096], 0, 1);
+    IOUtils.copyBytes(inputStream, outputStream, (long) 1, true);
+    Mockito.verify(inputStream, Mockito.atLeastOnce()).close();
+    Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
+  }
+
+  @Test
+  public void testCopyBytesWithCountShouldNotCloseStreamsWhenCloseIsFalse()
+      throws Exception {
+    InputStream inputStream = Mockito.mock(InputStream.class);
+    OutputStream outputStream = Mockito.mock(OutputStream.class);
+    Mockito.doReturn(-1).when(inputStream).read(new byte[4096], 0, 1);
+    IOUtils.copyBytes(inputStream, outputStream, (long) 1, false);
+    Mockito.verify(inputStream, Mockito.atMost(0)).close();
+    Mockito.verify(outputStream, Mockito.atMost(0)).close();
+  }
+
+  @Test
+  public void testCopyBytesWithCountShouldThrowOutTheStreamClosureExceptions()
+      throws Exception {
+    InputStream inputStream = Mockito.mock(InputStream.class);
+    OutputStream outputStream = Mockito.mock(OutputStream.class);
+    Mockito.doReturn(-1).when(inputStream).read(new byte[4096], 0, 1);
+    Mockito.doThrow(new IOException("Exception in closing the stream")).when(
+        outputStream).close();
+    try {
+      IOUtils.copyBytes(inputStream, outputStream, (long) 1, true);
+      fail("Should throw out the exception");
+    } catch (IOException e) {
+      assertEquals("Not throwing the expected exception.",
+          "Exception in closing the stream", e.getMessage());
+    }
+    Mockito.verify(inputStream, Mockito.atLeastOnce()).close();
+    Mockito.verify(outputStream, Mockito.atLeastOnce()).close();
+  }
+  
 }
 }

+ 81 - 0
common/src/test/core/org/apache/hadoop/io/TestObjectWritableProtos.java

@@ -0,0 +1,81 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.io;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.IOException;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.Test;
+
+import com.google.protobuf.DescriptorProtos;
+import com.google.protobuf.Message;
+
+/**
+ * Test case for the use of Protocol Buffers within ObjectWritable.
+ */
+public class TestObjectWritableProtos {
+
+  @Test
+  public void testProtoBufs() throws IOException {
+    doTest(1);
+  }
+
+  @Test
+  public void testProtoBufs2() throws IOException {
+    doTest(2);
+  }
+  
+  @Test
+  public void testProtoBufs3() throws IOException {
+    doTest(3);
+  }
+  
+  /**
+   * Write a protobuf to a buffer 'numProtos' times, and then
+   * read them back, making sure all data comes through correctly.
+   */
+  private void doTest(int numProtos) throws IOException {
+    Configuration conf = new Configuration();
+    DataOutputBuffer out = new DataOutputBuffer();
+
+    // Write numProtos protobufs to the buffer
+    Message[] sent = new Message[numProtos];
+    for (int i = 0; i < numProtos; i++) {
+      // Construct a test protocol buffer using one of the
+      // protos that ships with the protobuf library
+      Message testProto = DescriptorProtos.EnumValueDescriptorProto.newBuilder()
+        .setName("test" + i).setNumber(i).build();
+      ObjectWritable.writeObject(out, testProto,
+          DescriptorProtos.EnumValueDescriptorProto.class, conf);
+      sent[i] = testProto;
+    }
+
+    // Read back the data
+    DataInputBuffer in = new DataInputBuffer();
+    in.reset(out.getData(), out.getLength());
+    
+    for (int i = 0; i < numProtos; i++) {
+      Message received = (Message)ObjectWritable.readObject(in, conf);
+      
+      assertEquals(sent[i], received);
+    }
+  }
+
+}

+ 15 - 3
common/src/test/core/org/apache/hadoop/io/compress/TestCodec.java

@@ -40,7 +40,6 @@ import java.util.zip.GZIPOutputStream;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.fs.FSDataOutputStream;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.FileSystem;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
@@ -52,8 +51,7 @@ import org.apache.hadoop.io.SequenceFile;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
 import org.apache.hadoop.io.SequenceFile.CompressionType;
-import org.apache.hadoop.io.compress.CompressionOutputStream;
-import org.apache.hadoop.io.compress.CompressorStream;
+import org.apache.hadoop.io.compress.snappy.LoadSnappy;
 import org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor;
 import org.apache.hadoop.io.compress.zlib.BuiltInGzipDecompressor;
 import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater;
 import org.apache.hadoop.io.compress.zlib.BuiltInZlibDeflater;
 import org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater;
 import org.apache.hadoop.io.compress.zlib.BuiltInZlibInflater;
@@ -68,6 +66,7 @@ import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 
 
+import org.junit.Assert;
 import org.junit.Test;
 import org.junit.Test;
 import static org.junit.Assert.*;
 import static org.junit.Assert.*;
 
 
@@ -96,6 +95,19 @@ public class TestCodec {
     codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec");
     codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.BZip2Codec");
     codecTest(conf, seed, count, "org.apache.hadoop.io.compress.BZip2Codec");
     codecTest(conf, seed, count, "org.apache.hadoop.io.compress.BZip2Codec");
   }
   }
+  
+  @Test
+  public void testSnappyCodec() throws IOException {
+    if (LoadSnappy.isAvailable()) {
+      if (LoadSnappy.isLoaded()) {
+        codecTest(conf, seed, 0, "org.apache.hadoop.io.compress.SnappyCodec");
+        codecTest(conf, seed, count, "org.apache.hadoop.io.compress.SnappyCodec");
+      }
+      else {
+        Assert.fail("Snappy native available but Hadoop native not");
+      }
+    }
+  }
 
 
   @Test
   @Test
   public void testDeflateCodec() throws IOException {
   public void testDeflateCodec() throws IOException {

+ 96 - 15
common/src/test/core/org/apache/hadoop/ipc/TestAvroRpc.java

@@ -18,8 +18,14 @@
 
 
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+
+import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 
 
+import javax.security.sasl.Sasl;
+
+import junit.framework.Assert;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 
 
 import org.apache.avro.ipc.AvroRemoteException;
 import org.apache.avro.ipc.AvroRemoteException;
@@ -27,7 +33,16 @@ import org.apache.avro.util.Utf8;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.ipc.TestSaslRPC.CustomSecurityInfo;
+import org.apache.hadoop.ipc.TestSaslRPC.TestTokenIdentifier;
+import org.apache.hadoop.ipc.TestSaslRPC.TestTokenSecretManager;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
+import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SecurityInfo;
+import org.apache.hadoop.security.SecurityUtil;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.Token;
 
 
 /** Unit tests for AvroRpc. */
 /** Unit tests for AvroRpc. */
 public class TestAvroRpc extends TestCase {
 public class TestAvroRpc extends TestCase {
@@ -36,8 +51,6 @@ public class TestAvroRpc extends TestCase {
   public static final Log LOG =
   public static final Log LOG =
     LogFactory.getLog(TestAvroRpc.class);
     LogFactory.getLog(TestAvroRpc.class);
   
   
-  private static Configuration conf = new Configuration();
-
   int datasize = 1024*100;
   int datasize = 1024*100;
   int numThreads = 50;
   int numThreads = 50;
 
 
@@ -56,19 +69,47 @@ public class TestAvroRpc extends TestCase {
     }
     }
   }
   }
 
 
-  public void testCalls() throws Exception {
+  public void testReflect() throws Exception {
+    testReflect(false);
+  }
+
+  public void testSecureReflect() throws Exception {
+    testReflect(true);
+  }
+
+  public void testSpecific() throws Exception {
+    testSpecific(false);
+  }
+
+  public void testSecureSpecific() throws Exception {
+    testSpecific(true);
+  }
+
+  private void testReflect(boolean secure) throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
+    TestTokenSecretManager sm = null;
+    if (secure) {
+      makeSecure(conf);
+      sm = new TestTokenSecretManager();
+    }
+    UserGroupInformation.setConfiguration(conf);
     RPC.setProtocolEngine(conf, AvroTestProtocol.class, AvroRpcEngine.class);
     RPC.setProtocolEngine(conf, AvroTestProtocol.class, AvroRpcEngine.class);
     Server server = RPC.getServer(AvroTestProtocol.class,
     Server server = RPC.getServer(AvroTestProtocol.class,
-                                  new TestImpl(), ADDRESS, 0, conf);
-    AvroTestProtocol proxy = null;
+                                  new TestImpl(), ADDRESS, 0, 5, true, 
+                                  conf, sm);
     try {
     try {
       server.start();
       server.start();
-
       InetSocketAddress addr = NetUtils.getConnectAddress(server);
       InetSocketAddress addr = NetUtils.getConnectAddress(server);
-      proxy =
+
+      if (secure) {
+        addToken(sm, addr);
+        //QOP must be auth
+        Assert.assertEquals("auth", SaslRpcServer.SASL_PROPS.get(Sasl.QOP));
+      }
+
+      AvroTestProtocol proxy =
         (AvroTestProtocol)RPC.getProxy(AvroTestProtocol.class, 0, addr, conf);
         (AvroTestProtocol)RPC.getProxy(AvroTestProtocol.class, 0, addr, conf);
-      
+
       proxy.ping();
       proxy.ping();
 
 
       String echo = proxy.echo("hello world");
       String echo = proxy.echo("hello world");
@@ -89,23 +130,62 @@ public class TestAvroRpc extends TestCase {
       assertTrue(caught);
       assertTrue(caught);
 
 
     } finally {
     } finally {
+      resetSecurity();
       server.stop();
       server.stop();
     }
     }
   }
   }
 
 
-  public void testAvroSpecificRpc() throws Exception {
+  private void makeSecure(Configuration conf) {
+    conf.set(HADOOP_SECURITY_AUTHENTICATION, "kerberos");
+    conf.set("hadoop.rpc.socket.factory.class.default", "");
+    //Avro doesn't work with security annotations on protocol.
+    //Avro works ONLY with custom security context
+    SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
+  }
+  
+  private void resetSecurity() {
+    SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
+  }
+
+  private void addToken(TestTokenSecretManager sm, 
+      InetSocketAddress addr) throws IOException {
+    final UserGroupInformation current = UserGroupInformation.getCurrentUser();
+    
+    TestTokenIdentifier tokenId = new TestTokenIdentifier(new Text(current
+        .getUserName()));
+    Token<TestTokenIdentifier> token = new Token<TestTokenIdentifier>(tokenId,
+        sm);
+    Text host = new Text(addr.getAddress().getHostAddress() + ":"
+        + addr.getPort());
+    token.setService(host);
+    LOG.info("Service IP address for token is " + host);
+    current.addToken(token);
+  }
+
+  private void testSpecific(boolean secure) throws Exception {
     Configuration conf = new Configuration();
     Configuration conf = new Configuration();
+    TestTokenSecretManager sm = null;
+    if (secure) {
+      makeSecure(conf);
+      sm = new TestTokenSecretManager();
+    }
+    UserGroupInformation.setConfiguration(conf);
     RPC.setProtocolEngine(conf, AvroSpecificTestProtocol.class, 
     RPC.setProtocolEngine(conf, AvroSpecificTestProtocol.class, 
         AvroSpecificRpcEngine.class);
         AvroSpecificRpcEngine.class);
     Server server = RPC.getServer(AvroSpecificTestProtocol.class,
     Server server = RPC.getServer(AvroSpecificTestProtocol.class,
-                                  new AvroSpecificTestProtocolImpl(), 
-                                  ADDRESS, 0, conf);
-    AvroSpecificTestProtocol proxy = null;
+        new AvroSpecificTestProtocolImpl(), ADDRESS, 0, 5, true, 
+        conf, sm);
     try {
     try {
       server.start();
       server.start();
-
       InetSocketAddress addr = NetUtils.getConnectAddress(server);
       InetSocketAddress addr = NetUtils.getConnectAddress(server);
-      proxy =
+
+      if (secure) {
+        addToken(sm, addr);
+        //QOP must be auth
+        Assert.assertEquals("auth", SaslRpcServer.SASL_PROPS.get(Sasl.QOP));
+      }
+
+      AvroSpecificTestProtocol proxy =
         (AvroSpecificTestProtocol)RPC.getProxy(AvroSpecificTestProtocol.class, 
         (AvroSpecificTestProtocol)RPC.getProxy(AvroSpecificTestProtocol.class, 
             0, addr, conf);
             0, addr, conf);
       
       
@@ -116,6 +196,7 @@ public class TestAvroRpc extends TestCase {
       assertEquals(3, intResult);
       assertEquals(3, intResult);
 
 
     } finally {
     } finally {
+      resetSecurity();
       server.stop();
       server.stop();
     }
     }
   }
   }
@@ -134,5 +215,5 @@ public class TestAvroRpc extends TestCase {
     }
     }
     
     
   }
   }
-  
+
 }
 }

+ 51 - 0
common/src/test/core/org/apache/hadoop/ipc/TestIPC.java

@@ -23,6 +23,7 @@ import org.apache.commons.logging.*;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.LongWritable;
 import org.apache.hadoop.io.LongWritable;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 
 
@@ -45,6 +46,9 @@ import static org.mockito.Mockito.*;
 
 
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.junit.Assume;
 import org.junit.Assume;
+import org.mockito.Mockito;
+import org.mockito.invocation.InvocationOnMock;
+import org.mockito.stubbing.Answer;
 
 
 import com.google.common.primitives.Bytes;
 import com.google.common.primitives.Bytes;
 import com.google.common.primitives.Ints;
 import com.google.common.primitives.Ints;
@@ -469,6 +473,53 @@ public class TestIPC {
     }
     }
   }
   }
 
 
+  /**
+   * Test that, if a RuntimeException is thrown after creating a socket
+   * but before successfully connecting to the IPC server, that the
+   * failure is handled properly. This is a regression test for
+   * HADOOP-7428.
+   */
+  @Test
+  public void testRTEDuringConnectionSetup() throws Exception {
+    // Set up a socket factory which returns sockets which
+    // throw an RTE when setSoTimeout is called.
+    SocketFactory spyFactory = spy(NetUtils.getDefaultSocketFactory(conf));
+    Mockito.doAnswer(new Answer<Socket>() {
+      @Override
+      public Socket answer(InvocationOnMock invocation) throws Throwable {
+        Socket s = spy((Socket)invocation.callRealMethod());
+        doThrow(new RuntimeException("Injected fault")).when(s)
+          .setSoTimeout(anyInt());
+        return s;
+      }
+    }).when(spyFactory).createSocket();
+      
+    Server server = new TestServer(1, true);
+    server.start();
+    try {
+      // Call should fail due to injected exception.
+      InetSocketAddress address = NetUtils.getConnectAddress(server);
+      Client client = new Client(LongWritable.class, conf, spyFactory);
+      try {
+        client.call(new LongWritable(RANDOM.nextLong()),
+                address, null, null, 0, conf);
+        fail("Expected an exception to have been thrown");
+      } catch (Exception e) {
+        LOG.info("caught expected exception", e);
+        assertTrue(StringUtils.stringifyException(e).contains(
+            "Injected fault"));
+      }
+      // Resetting to the normal socket behavior should succeed
+      // (i.e. it should not have cached a half-constructed connection)
+  
+      Mockito.reset(spyFactory);
+      client.call(new LongWritable(RANDOM.nextLong()),
+          address, null, null, 0, conf);
+    } finally {
+      server.stop();
+    }
+  }
+  
   @Test
   @Test
   public void testIpcTimeout() throws Exception {
   public void testIpcTimeout() throws Exception {
     // start server
     // start server

+ 19 - 0
common/src/test/core/org/apache/hadoop/ipc/TestRPC.java

@@ -40,6 +40,10 @@ import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.authorize.Service;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
+
+import com.google.protobuf.DescriptorProtos;
+import com.google.protobuf.DescriptorProtos.EnumDescriptorProto;
+
 import static org.apache.hadoop.test.MetricsAsserts.*;
 import static org.apache.hadoop.test.MetricsAsserts.*;
 
 
 import static org.mockito.Mockito.*;
 import static org.mockito.Mockito.*;
@@ -71,6 +75,9 @@ public class TestRPC extends TestCase {
     int error() throws IOException;
     int error() throws IOException;
     void testServerGet() throws IOException;
     void testServerGet() throws IOException;
     int[] exchange(int[] values) throws IOException;
     int[] exchange(int[] values) throws IOException;
+    
+    DescriptorProtos.EnumDescriptorProto exchangeProto(
+        DescriptorProtos.EnumDescriptorProto arg);
   }
   }
 
 
   public static class TestImpl implements TestProtocol {
   public static class TestImpl implements TestProtocol {
@@ -136,6 +143,11 @@ public class TestRPC extends TestCase {
       }
       }
       return values;
       return values;
     }
     }
+
+    @Override
+    public EnumDescriptorProto exchangeProto(EnumDescriptorProto arg) {
+      return arg;
+    }
   }
   }
 
 
   //
   //
@@ -314,6 +326,13 @@ public class TestRPC extends TestCase {
 
 
     intResult = proxy.add(new int[] {1, 2});
     intResult = proxy.add(new int[] {1, 2});
     assertEquals(intResult, 3);
     assertEquals(intResult, 3);
+    
+    // Test protobufs
+    EnumDescriptorProto sendProto =
+      EnumDescriptorProto.newBuilder().setName("test").build();
+    EnumDescriptorProto retProto = proxy.exchangeProto(sendProto);
+    assertEquals(sendProto, retProto);
+    assertNotSame(sendProto, retProto);
 
 
     boolean caught = false;
     boolean caught = false;
     try {
     try {

+ 67 - 14
common/src/test/core/org/apache/hadoop/ipc/TestSaslRPC.java

@@ -18,12 +18,15 @@
 
 
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
-import static org.apache.hadoop.fs.CommonConfigurationKeys.HADOOP_SECURITY_AUTHENTICATION;
-import static org.junit.Assert.*;
+import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.HADOOP_SECURITY_AUTHENTICATION;
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertFalse;
+import static org.junit.Assert.assertTrue;
 
 
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.DataOutput;
 import java.io.DataOutput;
 import java.io.IOException;
 import java.io.IOException;
+import java.lang.annotation.Annotation;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.Collection;
 import java.util.Collection;
@@ -33,28 +36,28 @@ import javax.security.sasl.Sasl;
 
 
 import junit.framework.Assert;
 import junit.framework.Assert;
 
 
-import org.apache.commons.logging.*;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.impl.Log4JLogger;
 import org.apache.commons.logging.impl.Log4JLogger;
-
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.KerberosInfo;
 import org.apache.hadoop.security.KerberosInfo;
-import org.apache.hadoop.security.token.SecretManager;
-import org.apache.hadoop.security.token.Token;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.security.token.TokenInfo;
-import org.apache.hadoop.security.token.TokenSelector;
-import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.SaslInputStream;
 import org.apache.hadoop.security.SaslInputStream;
 import org.apache.hadoop.security.SaslRpcClient;
 import org.apache.hadoop.security.SaslRpcClient;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.SaslRpcServer;
+import org.apache.hadoop.security.SecurityInfo;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.TestUserGroupInformation;
 import org.apache.hadoop.security.TestUserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
-
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.Token;
+import org.apache.hadoop.security.token.TokenIdentifier;
+import org.apache.hadoop.security.token.TokenInfo;
+import org.apache.hadoop.security.token.TokenSelector;
+import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.log4j.Level;
 import org.apache.log4j.Level;
 import org.junit.Test;
 import org.junit.Test;
 
 
@@ -187,6 +190,42 @@ public class TestSaslRPC {
     }
     }
   }
   }
 
 
+  public static class CustomSecurityInfo extends SecurityInfo {
+
+    @Override
+    public KerberosInfo getKerberosInfo(Class<?> protocol) {
+      return new KerberosInfo() {
+        @Override
+        public Class<? extends Annotation> annotationType() {
+          return null;
+        }
+        @Override
+        public String serverPrincipal() {
+          return SERVER_PRINCIPAL_KEY;
+        }
+        @Override
+        public String clientPrincipal() {
+          return null;
+        }
+      };
+    }
+
+    @Override
+    public TokenInfo getTokenInfo(Class<?> protocol) {
+      return new TokenInfo() {
+        @Override
+        public Class<? extends TokenSelector<? extends 
+            TokenIdentifier>> value() {
+          return TestTokenSelector.class;
+        }
+        @Override
+        public Class<? extends Annotation> annotationType() {
+          return null;
+        }
+      };
+    }
+  }
+
   @Test
   @Test
   public void testDigestRpc() throws Exception {
   public void testDigestRpc() throws Exception {
     TestTokenSecretManager sm = new TestTokenSecretManager();
     TestTokenSecretManager sm = new TestTokenSecretManager();
@@ -195,7 +234,21 @@ public class TestSaslRPC {
 
 
     doDigestRpc(server, sm);
     doDigestRpc(server, sm);
   }
   }
-  
+
+  @Test
+  public void testDigestRpcWithoutAnnotation() throws Exception {
+    TestTokenSecretManager sm = new TestTokenSecretManager();
+    try {
+      SecurityUtil.setSecurityInfoProviders(new CustomSecurityInfo());
+      final Server server = RPC.getServer(TestSaslProtocol.class,
+                                          new TestSaslImpl(), ADDRESS, 0, 5, 
+                                          true, conf, sm);
+      doDigestRpc(server, sm);
+    } finally {
+      SecurityUtil.setSecurityInfoProviders(new SecurityInfo[0]);
+    }
+  }
+
   @Test
   @Test
   public void testSecureToInsecureRpc() throws Exception {
   public void testSecureToInsecureRpc() throws Exception {
     Server server = RPC.getServer(TestSaslProtocol.class,
     Server server = RPC.getServer(TestSaslProtocol.class,
@@ -223,8 +276,8 @@ public class TestSaslRPC {
     assertTrue(succeeded);
     assertTrue(succeeded);
   }
   }
   
   
-  private void doDigestRpc(Server server, TestTokenSecretManager sm)
-      throws Exception {
+  private void doDigestRpc(Server server, TestTokenSecretManager sm
+                           ) throws Exception {
     server.start();
     server.start();
 
 
     final UserGroupInformation current = UserGroupInformation.getCurrentUser();
     final UserGroupInformation current = UserGroupInformation.getCurrentUser();

+ 13 - 0
common/src/test/core/org/apache/hadoop/jmx/TestJMXJsonServlet.java

@@ -65,5 +65,18 @@ public class TestJMXJsonServlet extends HttpServerFunctionalTest {
     result = readOutput(new URL(baseUrl, "/jmx"));
     result = readOutput(new URL(baseUrl, "/jmx"));
     LOG.info("/jmx RESULT: "+result);
     LOG.info("/jmx RESULT: "+result);
     assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result);
     assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result);
+    
+    // test to get an attribute of a mbean
+    result = readOutput(new URL(baseUrl, 
+        "/jmx?get=java.lang:type=Memory::HeapMemoryUsage"));
+    LOG.info("/jmx RESULT: "+result);
+    assertReFind("\"name\"\\s*:\\s*\"java.lang:type=Memory\"", result);
+    assertReFind("\"committed\"\\s*:", result);
+    
+    // negative test to get an attribute of a mbean
+    result = readOutput(new URL(baseUrl, 
+        "/jmx?get=java.lang:type=Memory::"));
+    LOG.info("/jmx RESULT: "+result);
+    assertReFind("\"ERROR\"", result);
   }
   }
 }
 }

+ 1 - 1
common/src/test/core/org/apache/hadoop/metrics2/impl/TestMetricsConfig.java

@@ -138,6 +138,6 @@ public class TestMetricsConfig {
    * @return the filename
    * @return the filename
    */
    */
   public static String getTestFilename(String basename) {
   public static String getTestFilename(String basename) {
-    return "build/classes/"+ basename +".properties";
+    return "build/test/"+ basename +".properties";
   }
   }
 }
 }

+ 23 - 23
common/src/test/core/org/apache/hadoop/security/TestUserGroupInformation.java

@@ -62,6 +62,29 @@ public class TestUserGroupInformation {
         + "DEFAULT");
         + "DEFAULT");
     UserGroupInformation.setConfiguration(conf);
     UserGroupInformation.setConfiguration(conf);
   }
   }
+  
+  /** Test login method */
+  @Test
+  public void testLogin() throws Exception {
+    // login from unix
+    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
+    assertEquals(UserGroupInformation.getCurrentUser(),
+                 UserGroupInformation.getLoginUser());
+    assertTrue(ugi.getGroupNames().length >= 1);
+
+    // ensure that doAs works correctly
+    UserGroupInformation userGroupInfo = 
+      UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
+    UserGroupInformation curUGI = 
+      userGroupInfo.doAs(new PrivilegedExceptionAction<UserGroupInformation>(){
+        public UserGroupInformation run() throws IOException {
+          return UserGroupInformation.getCurrentUser();
+        }});
+    // make sure in the scope of the doAs, the right user is current
+    assertEquals(curUGI, userGroupInfo);
+    // make sure it is not the same as the login user
+    assertFalse(curUGI.equals(UserGroupInformation.getLoginUser()));
+  }
 
 
   /**
   /**
    * given user name - get all the groups.
    * given user name - get all the groups.
@@ -107,29 +130,6 @@ public class TestUserGroupInformation {
       }});
       }});
   }
   }
 
 
-  /** Test login method */
-  @Test
-  public void testLogin() throws Exception {
-    // login from unix
-    UserGroupInformation ugi = UserGroupInformation.getCurrentUser();
-    assertEquals(UserGroupInformation.getCurrentUser(),
-                 UserGroupInformation.getLoginUser());
-    assertTrue(ugi.getGroupNames().length >= 1);
-
-    // ensure that doAs works correctly
-    UserGroupInformation userGroupInfo = 
-      UserGroupInformation.createUserForTesting(USER_NAME, GROUP_NAMES);
-    UserGroupInformation curUGI = 
-      userGroupInfo.doAs(new PrivilegedExceptionAction<UserGroupInformation>(){
-        public UserGroupInformation run() throws IOException {
-          return UserGroupInformation.getCurrentUser();
-        }});
-    // make sure in the scope of the doAs, the right user is current
-    assertEquals(curUGI, userGroupInfo);
-    // make sure it is not the same as the login user
-    assertFalse(curUGI.equals(UserGroupInformation.getLoginUser()));
-  }
-
   /** test constructor */
   /** test constructor */
   @Test
   @Test
   public void testConstructor() throws Exception {
   public void testConstructor() throws Exception {

+ 1 - 1
common/src/test/core/org/apache/hadoop/security/token/delegation/TestDelegationToken.java

@@ -164,7 +164,7 @@ public class TestDelegationToken {
       action.run();
       action.run();
       Assert.fail("action did not throw " + except);
       Assert.fail("action did not throw " + except);
     } catch (Throwable th) {
     } catch (Throwable th) {
-      LOG.info("Caught an exception: " + StringUtils.stringifyException(th));
+      LOG.info("Caught an exception: ", th);
       assertEquals("action threw wrong exception", except, th.getClass());
       assertEquals("action threw wrong exception", except, th.getClass());
     }
     }
   }
   }

+ 72 - 0
common/src/test/core/org/apache/hadoop/util/TestProtoUtil.java

@@ -0,0 +1,72 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.util;
+
+import static org.junit.Assert.assertEquals;
+
+import java.io.ByteArrayInputStream;
+import java.io.ByteArrayOutputStream;
+import java.io.DataInputStream;
+import java.io.IOException;
+
+import org.junit.Test;
+
+import com.google.protobuf.CodedOutputStream;
+
+public class TestProtoUtil {
+  
+  /**
+   * Values to test encoding as variable length integers
+   */
+  private static final int[] TEST_VINT_VALUES = new int[] {
+    0, 1, -1, 127, 128, 129, 255, 256, 257,
+    0x1234, -0x1234,
+    0x123456, -0x123456,
+    0x12345678, -0x12345678
+  };
+
+  /**
+   * Test that readRawVarint32 is compatible with the varints encoded
+   * by ProtoBuf's CodedOutputStream.
+   */
+  @Test
+  public void testVarInt() throws IOException {
+    // Test a few manufactured values
+    for (int value : TEST_VINT_VALUES) {
+      doVarIntTest(value);
+    }
+    // Check 1-bits at every bit position
+    for (int i = 1; i != 0; i <<= 1) {
+      doVarIntTest(i);
+      doVarIntTest(-i);
+      doVarIntTest(i - 1);
+      doVarIntTest(~i);
+    }
+  }
+  
+  private void doVarIntTest(int value) throws IOException {
+    ByteArrayOutputStream baos = new ByteArrayOutputStream();
+    CodedOutputStream cout = CodedOutputStream.newInstance(baos);
+    cout.writeRawVarint32(value);
+    cout.flush();
+
+    DataInputStream dis = new DataInputStream(
+        new ByteArrayInputStream(baos.toByteArray()));
+    assertEquals(value, ProtoUtil.readRawVarint32(dis));
+  }
+}