Browse Source

merge trunk into HDFS-4949 branch

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/HDFS-4949@1514105 13f79535-47bb-0310-9956-ffa450edef68
Andrew Wang 11 years ago
parent
commit
dd00bb71aa
77 changed files with 2605 additions and 326 deletions
  1. 11 1
      BUILDING.txt
  2. 40 9
      dev-support/test-patch.sh
  3. 35 8
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
  4. 41 10
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  5. 7 1
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
  6. 10 9
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/util/PlatformName.java
  7. 37 3
      hadoop-common-project/hadoop-common/CHANGES.txt
  8. 15 0
      hadoop-common-project/hadoop-common/pom.xml
  9. 11 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java
  10. 31 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java
  11. 5 35
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java
  12. 67 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java
  13. 167 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java
  14. 5 89
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java
  15. 4 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java
  16. 4 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java
  17. 2 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java
  18. 12 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  19. 48 8
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java
  20. 13 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java
  21. 26 2
      hadoop-common-project/hadoop-common/src/main/native/native.vcxproj
  22. 46 7
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c
  23. 31 3
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c
  24. 4 0
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h
  25. 14 26
      hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c
  26. 1 0
      hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties
  27. 3 1
      hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h
  28. 48 0
      hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c
  29. 1 1
      hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm
  30. 42 19
      hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm
  31. 1 1
      hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm
  32. 44 4
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java
  33. 17 2
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java
  34. 122 0
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java
  35. 7 8
      hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java
  36. 10 2
      hadoop-common-project/hadoop-common/src/test/resources/testConf.xml
  37. 55 0
      hadoop-common-project/hadoop-minikdc/pom.xml
  38. 42 0
      hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java
  39. 121 0
      hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java
  40. 86 0
      hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/KerberosSecurityTestcase.java
  41. 534 0
      hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java
  42. 31 0
      hadoop-common-project/hadoop-minikdc/src/main/resources/log4j.properties
  43. 25 0
      hadoop-common-project/hadoop-minikdc/src/main/resources/minikdc-krb5.conf
  44. 47 0
      hadoop-common-project/hadoop-minikdc/src/main/resources/minikdc.ldiff
  45. 163 0
      hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java
  46. 1 0
      hadoop-common-project/pom.xml
  47. 11 0
      hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt
  48. 8 0
      hadoop-hdfs-project/hadoop-hdfs/pom.xml
  49. 2 0
      hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
  50. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java
  51. 8 3
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java
  52. 34 5
      hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java
  53. 3 2
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java
  54. 2 1
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java
  55. 218 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java
  56. 57 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java
  57. 28 0
      hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java
  58. 3 0
      hadoop-mapreduce-project/CHANGES.txt
  59. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml
  60. 2 0
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml
  61. 9 7
      hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java
  62. 30 4
      hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java
  63. 3 4
      hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java
  64. 7 0
      hadoop-project-dist/pom.xml
  65. 23 3
      hadoop-project/pom.xml
  66. 13 0
      hadoop-yarn-project/CHANGES.txt
  67. 1 7
      hadoop-yarn-project/hadoop-yarn/README
  68. 2 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
  69. 8 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java
  70. 2 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
  71. 2 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
  72. 4 6
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java
  73. 1 1
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java
  74. 20 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java
  75. 2 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java
  76. 1 2
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java
  77. 10 0
      hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java

+ 11 - 1
BUILDING.txt

@@ -7,7 +7,7 @@ Requirements:
 * JDK 1.6
 * JDK 1.6
 * Maven 3.0
 * Maven 3.0
 * Findbugs 1.3.9 (if running findbugs)
 * Findbugs 1.3.9 (if running findbugs)
-* ProtocolBuffer 2.4.1+ (for MapReduce and HDFS)
+* ProtocolBuffer 2.5.0
 * CMake 2.6 or newer (if compiling native code)
 * CMake 2.6 or newer (if compiling native code)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 
 
@@ -99,6 +99,16 @@ level once; and then work from the submodule. Keep in mind that SNAPSHOTs
 time out after a while, using the Maven '-nsu' will stop Maven from trying
 time out after a while, using the Maven '-nsu' will stop Maven from trying
 to update SNAPSHOTs from external repos.
 to update SNAPSHOTs from external repos.
 
 
+----------------------------------------------------------------------------------
+Protocol Buffer compiler
+
+The version of Protocol Buffer compiler, protoc, must match the version of the
+protobuf JAR.
+
+If you have multiple versions of protoc in your system, you can set in your 
+build shell the HADOOP_PROTOC_PATH environment variable to point to the one you 
+want to use for the Hadoop build. If you don't define this environment variable,
+protoc is looked up in the PATH.
 ----------------------------------------------------------------------------------
 ----------------------------------------------------------------------------------
 Importing projects to eclipse
 Importing projects to eclipse
 
 

+ 40 - 9
dev-support/test-patch.sh

@@ -426,7 +426,8 @@ checkJavadocWarnings () {
   echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
   echo "There appear to be $javadocWarnings javadoc warnings generated by the patched build."
 
 
   #There are 11 warnings that are caused by things that are caused by using sun internal APIs.
   #There are 11 warnings that are caused by things that are caused by using sun internal APIs.
-  OK_JAVADOC_WARNINGS=11;
+  #There are 2 warnings that are caused by the Apache DS Dn class used in MiniKdc.
+  OK_JAVADOC_WARNINGS=13;
   ### if current warnings greater than OK_JAVADOC_WARNINGS
   ### if current warnings greater than OK_JAVADOC_WARNINGS
   if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then
   if [[ $javadocWarnings -ne $OK_JAVADOC_WARNINGS ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
     JIRA_COMMENT="$JIRA_COMMENT
@@ -731,32 +732,62 @@ of hadoop-common prior to running the unit tests in $ordered_modules"
           fi
           fi
       fi
       fi
   fi
   fi
+  failed_test_builds=""
+  test_timeouts=""
   for module in $ordered_modules; do
   for module in $ordered_modules; do
     cd $module
     cd $module
+    module_suffix=`basename ${module}`
+    test_logfile=$PATCH_DIR/testrun_${module_suffix}.txt
     echo "  Running tests in $module"
     echo "  Running tests in $module"
     echo "  $MVN clean install -fn $NATIVE_PROFILE $REQUIRE_TEST_LIB_HADOOP -D${PROJECT_NAME}PatchProcess"
     echo "  $MVN clean install -fn $NATIVE_PROFILE $REQUIRE_TEST_LIB_HADOOP -D${PROJECT_NAME}PatchProcess"
-    $MVN clean install -fn $NATIVE_PROFILE $REQUIRE_TEST_LIB_HADOOP -D${PROJECT_NAME}PatchProcess
+    $MVN clean install -fae $NATIVE_PROFILE $REQUIRE_TEST_LIB_HADOOP -D${PROJECT_NAME}PatchProcess > $test_logfile 2>&1
+    test_build_result=$?
+    cat $test_logfile
+    module_test_timeouts=`$AWK '/^Running / { if (last) { print last } last=$2 } /^Tests run: / { last="" }' $test_logfile`
+    if [[ -n "$module_test_timeouts" ]] ; then
+      test_timeouts="$test_timeouts
+$module_test_timeouts"
+    fi
     module_failed_tests=`find . -name 'TEST*.xml' | xargs $GREP  -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-|                  |g" | sed -e "s|\.xml||g"`
     module_failed_tests=`find . -name 'TEST*.xml' | xargs $GREP  -l -E "<failure|<error" | sed -e "s|.*target/surefire-reports/TEST-|                  |g" | sed -e "s|\.xml||g"`
-    # With -fn mvn always exits with a 0 exit code.  Because of this we need to
-    # find the errors instead of using the exit code.  We assume that if the build
-    # failed a -1 is already given for that case
     if [[ -n "$module_failed_tests" ]] ; then
     if [[ -n "$module_failed_tests" ]] ; then
       failed_tests="${failed_tests}
       failed_tests="${failed_tests}
 ${module_failed_tests}"
 ${module_failed_tests}"
     fi
     fi
+    if [[ $test_build_result != 0 && -z "$module_failed_tests" && -z "$module_test_timeouts" ]] ; then
+      failed_test_builds="$module $failed_test_builds"
+    fi
     cd -
     cd -
   done
   done
+  result=0
+  comment_prefix="    {color:red}-1 core tests{color}."
   if [[ -n "$failed_tests" ]] ; then
   if [[ -n "$failed_tests" ]] ; then
     JIRA_COMMENT="$JIRA_COMMENT
     JIRA_COMMENT="$JIRA_COMMENT
 
 
-    {color:red}-1 core tests{color}.  The patch failed these unit tests in $modules:
+$comment_prefix  The patch failed these unit tests in $modules:
 $failed_tests"
 $failed_tests"
-    return 1
+    comment_prefix="                                    "
+    result=1
   fi
   fi
-  JIRA_COMMENT="$JIRA_COMMENT
+  if [[ -n "$test_timeouts" ]] ; then
+    JIRA_COMMENT="$JIRA_COMMENT
+
+$comment_prefix  The following test timeouts occurred in $modules:
+$test_timeouts"
+    comment_prefix="                                    "
+    result=1
+  fi
+  if [[ -n "$failed_test_builds" ]] ; then
+    JIRA_COMMENT="$JIRA_COMMENT
+
+$comment_prefix  The test build failed in $failed_test_builds"
+    result=1
+  fi
+  if [[ $result == 0 ]] ; then
+    JIRA_COMMENT="$JIRA_COMMENT
 
 
     {color:green}+1 core tests{color}.  The patch passed unit tests in $modules."
     {color:green}+1 core tests{color}.  The patch passed unit tests in $modules."
-  return 0
+  fi
+  return $result
 }
 }
 
 
 ###############################################################################
 ###############################################################################

+ 35 - 8
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java

@@ -37,6 +37,8 @@ import java.security.PrivilegedExceptionAction;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map;
 
 
+import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+
 /**
 /**
  * The {@link KerberosAuthenticator} implements the Kerberos SPNEGO authentication sequence.
  * The {@link KerberosAuthenticator} implements the Kerberos SPNEGO authentication sequence.
  * <p/>
  * <p/>
@@ -75,15 +77,31 @@ public class KerberosAuthenticator implements Authenticator {
 
 
     private static final String OS_LOGIN_MODULE_NAME;
     private static final String OS_LOGIN_MODULE_NAME;
     private static final boolean windows = System.getProperty("os.name").startsWith("Windows");
     private static final boolean windows = System.getProperty("os.name").startsWith("Windows");
+    private static final boolean is64Bit = System.getProperty("os.arch").contains("64");
+    private static final boolean aix = System.getProperty("os.name").equals("AIX");
 
 
-    static {
-      if (windows) {
-        OS_LOGIN_MODULE_NAME = "com.sun.security.auth.module.NTLoginModule";
+    /* Return the OS login module class name */
+    private static String getOSLoginModuleName() {
+      if (IBM_JAVA) {
+        if (windows) {
+          return is64Bit ? "com.ibm.security.auth.module.Win64LoginModule"
+              : "com.ibm.security.auth.module.NTLoginModule";
+        } else if (aix) {
+          return is64Bit ? "com.ibm.security.auth.module.AIX64LoginModule"
+              : "com.ibm.security.auth.module.AIXLoginModule";
+        } else {
+          return "com.ibm.security.auth.module.LinuxLoginModule";
+        }
       } else {
       } else {
-        OS_LOGIN_MODULE_NAME = "com.sun.security.auth.module.UnixLoginModule";
+        return windows ? "com.sun.security.auth.module.NTLoginModule"
+            : "com.sun.security.auth.module.UnixLoginModule";
       }
       }
     }
     }
 
 
+    static {
+      OS_LOGIN_MODULE_NAME = getOSLoginModuleName();
+    }
+
     private static final AppConfigurationEntry OS_SPECIFIC_LOGIN =
     private static final AppConfigurationEntry OS_SPECIFIC_LOGIN =
       new AppConfigurationEntry(OS_LOGIN_MODULE_NAME,
       new AppConfigurationEntry(OS_LOGIN_MODULE_NAME,
                                 AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
                                 AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
@@ -92,13 +110,22 @@ public class KerberosAuthenticator implements Authenticator {
     private static final Map<String, String> USER_KERBEROS_OPTIONS = new HashMap<String, String>();
     private static final Map<String, String> USER_KERBEROS_OPTIONS = new HashMap<String, String>();
 
 
     static {
     static {
-      USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
-      USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
-      USER_KERBEROS_OPTIONS.put("renewTGT", "true");
       String ticketCache = System.getenv("KRB5CCNAME");
       String ticketCache = System.getenv("KRB5CCNAME");
+      if (IBM_JAVA) {
+        USER_KERBEROS_OPTIONS.put("useDefaultCcache", "true");
+      } else {
+        USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
+        USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
+      }
       if (ticketCache != null) {
       if (ticketCache != null) {
-        USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
+        if (IBM_JAVA) {
+          // The first value searched when "useDefaultCcache" is used.
+          System.setProperty("KRB5CCNAME", ticketCache);
+        } else {
+          USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
+        }
       }
       }
+      USER_KERBEROS_OPTIONS.put("renewTGT", "true");
     }
     }
 
 
     private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
     private static final AppConfigurationEntry USER_KERBEROS_LOGIN =

+ 41 - 10
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -21,6 +21,7 @@ import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSCredential;
 import org.ietf.jgss.GSSCredential;
 import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.GSSManager;
+import org.ietf.jgss.Oid;
 import org.slf4j.Logger;
 import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 import org.slf4j.LoggerFactory;
 
 
@@ -44,6 +45,8 @@ import java.util.Map;
 import java.util.Properties;
 import java.util.Properties;
 import java.util.Set;
 import java.util.Set;
 
 
+import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+
 /**
 /**
  * The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO authentication mechanism for HTTP.
  * The {@link KerberosAuthenticationHandler} implements the Kerberos SPNEGO authentication mechanism for HTTP.
  * <p/>
  * <p/>
@@ -77,18 +80,33 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
     @Override
     @Override
     public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
     public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
       Map<String, String> options = new HashMap<String, String>();
       Map<String, String> options = new HashMap<String, String>();
-      options.put("keyTab", keytab);
-      options.put("principal", principal);
-      options.put("useKeyTab", "true");
-      options.put("storeKey", "true");
-      options.put("doNotPrompt", "true");
-      options.put("useTicketCache", "true");
-      options.put("renewTGT", "true");
+      if (IBM_JAVA) {
+        options.put("useKeytab",
+            keytab.startsWith("file://") ? keytab : "file://" + keytab);
+        options.put("principal", principal);
+        options.put("credsType", "acceptor");
+      } else {
+        options.put("keyTab", keytab);
+        options.put("principal", principal);
+        options.put("useKeyTab", "true");
+        options.put("storeKey", "true");
+        options.put("doNotPrompt", "true");
+        options.put("useTicketCache", "true");
+        options.put("renewTGT", "true");
+        options.put("isInitiator", "false");
+      }
       options.put("refreshKrb5Config", "true");
       options.put("refreshKrb5Config", "true");
-      options.put("isInitiator", "false");
       String ticketCache = System.getenv("KRB5CCNAME");
       String ticketCache = System.getenv("KRB5CCNAME");
       if (ticketCache != null) {
       if (ticketCache != null) {
-        options.put("ticketCache", ticketCache);
+        if (IBM_JAVA) {
+          options.put("useDefaultCcache", "true");
+          // The first value searched when "useDefaultCcache" is used.
+          System.setProperty("KRB5CCNAME", ticketCache);
+          options.put("renewTGT", "true");
+          options.put("credsType", "both");
+        } else {
+          options.put("ticketCache", ticketCache);
+        }
       }
       }
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         options.put("debug", "true");
         options.put("debug", "true");
@@ -294,8 +312,18 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
           public AuthenticationToken run() throws Exception {
           public AuthenticationToken run() throws Exception {
             AuthenticationToken token = null;
             AuthenticationToken token = null;
             GSSContext gssContext = null;
             GSSContext gssContext = null;
+            GSSCredential gssCreds = null;
             try {
             try {
-              gssContext = gssManager.createContext((GSSCredential) null);
+              if (IBM_JAVA) {
+                // IBM JDK needs non-null credentials to be passed to createContext here, with
+                // SPNEGO mechanism specified, otherwise JGSS will use its default mechanism
+                // only, which is Kerberos V5.
+                gssCreds = gssManager.createCredential(null, GSSCredential.INDEFINITE_LIFETIME,
+                    new Oid[]{KerberosUtil.getOidInstance("GSS_SPNEGO_MECH_OID"),
+                        KerberosUtil.getOidInstance("GSS_KRB5_MECH_OID")},
+                    GSSCredential.ACCEPT_ONLY);
+              }
+              gssContext = gssManager.createContext(gssCreds);
               byte[] serverToken = gssContext.acceptSecContext(clientToken, 0, clientToken.length);
               byte[] serverToken = gssContext.acceptSecContext(clientToken, 0, clientToken.length);
               if (serverToken != null && serverToken.length > 0) {
               if (serverToken != null && serverToken.length > 0) {
                 String authenticate = base64.encodeToString(serverToken);
                 String authenticate = base64.encodeToString(serverToken);
@@ -317,6 +345,9 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
               if (gssContext != null) {
               if (gssContext != null) {
                 gssContext.dispose();
                 gssContext.dispose();
               }
               }
+              if (gssCreds != null) {
+                gssCreds.dispose();
+              }
             }
             }
             return token;
             return token;
           }
           }

+ 7 - 1
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java

@@ -27,6 +27,8 @@ import java.util.Locale;
 import org.ietf.jgss.GSSException;
 import org.ietf.jgss.GSSException;
 import org.ietf.jgss.Oid;
 import org.ietf.jgss.Oid;
 
 
+import static org.apache.hadoop.util.PlatformName.IBM_JAVA;
+
 public class KerberosUtil {
 public class KerberosUtil {
 
 
   /* Return the Kerberos login module name */
   /* Return the Kerberos login module name */
@@ -40,7 +42,11 @@ public class KerberosUtil {
       throws ClassNotFoundException, GSSException, NoSuchFieldException,
       throws ClassNotFoundException, GSSException, NoSuchFieldException,
       IllegalAccessException {
       IllegalAccessException {
     Class<?> oidClass;
     Class<?> oidClass;
-    if (System.getProperty("java.vendor").contains("IBM")) {
+    if (IBM_JAVA) {
+      if ("NT_GSS_KRB5_PRINCIPAL".equals(oidName)) {
+        // IBM JDK GSSUtil class does not have field for krb5 principal oid
+        return new Oid("1.2.840.113554.1.2.2.1");
+      }
       oidClass = Class.forName("com.ibm.security.jgss.GSSUtil");
       oidClass = Class.forName("com.ibm.security.jgss.GSSUtil");
     } else {
     } else {
       oidClass = Class.forName("sun.security.jgss.GSSUtil");
       oidClass = Class.forName("sun.security.jgss.GSSUtil");

+ 10 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/PlatformName.java → hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/util/PlatformName.java

@@ -22,32 +22,33 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
- * A helper class for getting build-info of the java-vm. 
- * 
+ * A helper class for getting build-info of the java-vm.
+ *
  */
  */
 @InterfaceAudience.LimitedPrivate({"HBase"})
 @InterfaceAudience.LimitedPrivate({"HBase"})
 @InterfaceStability.Unstable
 @InterfaceStability.Unstable
 public class PlatformName {
 public class PlatformName {
   /**
   /**
-   * The complete platform 'name' to identify the platform as 
+   * The complete platform 'name' to identify the platform as
    * per the java-vm.
    * per the java-vm.
    */
    */
   public static final String PLATFORM_NAME =
   public static final String PLATFORM_NAME =
-      (Shell.WINDOWS ? System.getenv("os") : System.getProperty("os.name"))
+      (System.getProperty("os.name").startsWith("Windows")
+      ? System.getenv("os") : System.getProperty("os.name"))
       + "-" + System.getProperty("os.arch")
       + "-" + System.getProperty("os.arch")
       + "-" + System.getProperty("sun.arch.data.model");
       + "-" + System.getProperty("sun.arch.data.model");
-  
+
   /**
   /**
-   * The java vendor name used in this platform. 
+   * The java vendor name used in this platform.
    */
    */
   public static final String JAVA_VENDOR_NAME = System.getProperty("java.vendor");
   public static final String JAVA_VENDOR_NAME = System.getProperty("java.vendor");
 
 
   /**
   /**
-   * A public static variable to indicate the current java vendor is 
-   * IBM java or not. 
+   * A public static variable to indicate the current java vendor is
+   * IBM java or not.
    */
    */
   public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM");
   public static final boolean IBM_JAVA = JAVA_VENDOR_NAME.contains("IBM");
-  
+
   public static void main(String[] args) {
   public static void main(String[] args) {
     System.out.println(PLATFORM_NAME);
     System.out.println(PLATFORM_NAME);
   }
   }

+ 37 - 3
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -56,9 +56,6 @@ Trunk (Unreleased)
     HADOOP-8719. Workaround for kerberos-related log errors upon running any
     HADOOP-8719. Workaround for kerberos-related log errors upon running any
     hadoop command on OSX. (Jianbin Wei via harsh)
     hadoop command on OSX. (Jianbin Wei via harsh)
 
 
-    HADOOP-8814. Replace string equals "" by String#isEmpty().
-    (Brandon Li via suresh)
-
     HADOOP-8588. SerializationFactory shouldn't throw a
     HADOOP-8588. SerializationFactory shouldn't throw a
     NullPointerException if the serializations list is empty.
     NullPointerException if the serializations list is empty.
     (Sho Shimauchi via harsh)
     (Sho Shimauchi via harsh)
@@ -271,6 +268,15 @@ Trunk (Unreleased)
     HADOOP-9433 TestLocalFileSystem#testHasFileDescriptor leaks file handle
     HADOOP-9433 TestLocalFileSystem#testHasFileDescriptor leaks file handle
     (Chris Nauroth via sanjay)
     (Chris Nauroth via sanjay)
 
 
+    HADOOP-9583. test-patch gives +1 despite build failure when running tests.
+    (jlowe via kihwal)
+
+    HADOOP-9847. TestGlobPath symlink tests fail to cleanup properly.
+    (cmccabe via wang)
+
+    HADOOP-9740. Fix FsShell '-text' command to be able to read Avro
+    files stored in HDFS and other filesystems. (Allan Yan via cutting)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
     HADOOP-7761. Improve the performance of raw comparisons. (todd)
@@ -285,6 +291,9 @@ Release 2.3.0 - UNRELEASED
 
 
   IMPROVEMENTS
   IMPROVEMENTS
 
 
+    HADOOP 9871. Fix intermittent findbugs warnings in DefaultMetricsSystem.
+    (Junping Du via llu)
+
     HADOOP-9319. Update bundled LZ4 source to r99. (Binglin Chang via llu)
     HADOOP-9319. Update bundled LZ4 source to r99. (Binglin Chang via llu)
 
 
     HADOOP-9241. DU refresh interval is not configurable (harsh)
     HADOOP-9241. DU refresh interval is not configurable (harsh)
@@ -304,6 +313,9 @@ Release 2.3.0 - UNRELEASED
     HADOOP-9758.  Provide configuration option for FileSystem/FileContext
     HADOOP-9758.  Provide configuration option for FileSystem/FileContext
     symlink resolution.  (Andrew Wang via Colin Patrick McCabe)
     symlink resolution.  (Andrew Wang via Colin Patrick McCabe)
 
 
+    HADOOP-9848. Create a MiniKDC for use with security testing. 
+    (ywskycn via tucu)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
     HADOOP-9748. Reduce blocking on UGI.ensureInitialized (daryn)
@@ -319,6 +331,10 @@ Release 2.3.0 - UNRELEASED
     HADOOP-9817. FileSystem#globStatus and FileContext#globStatus need to work
     HADOOP-9817. FileSystem#globStatus and FileContext#globStatus need to work
     with symlinks. (Colin Patrick McCabe via Andrew Wang)
     with symlinks. (Colin Patrick McCabe via Andrew Wang)
 
 
+    HADOOP-9652.  RawLocalFs#getFileLinkStatus does not fill in the link owner
+    and mode.  (Andrew Wang via Colin Patrick McCabe)
+
+
 Release 2.1.1-beta - UNRELEASED
 Release 2.1.1-beta - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -326,6 +342,8 @@ Release 2.1.1-beta - UNRELEASED
   NEW FEATURES
   NEW FEATURES
 
 
   IMPROVEMENTS
   IMPROVEMENTS
+
+    HADOOP-9446. Support Kerberos SPNEGO for IBM JDK. (Yu Gao via llu)
  
  
     HADOOP-9787. ShutdownHelper util to shutdown threads and threadpools.
     HADOOP-9787. ShutdownHelper util to shutdown threads and threadpools.
     (Karthik Kambatla via Sandy Ryza)
     (Karthik Kambatla via Sandy Ryza)
@@ -340,6 +358,11 @@ Release 2.1.1-beta - UNRELEASED
 
 
     HADOOP-9789. Support server advertised kerberos principals (daryn)
     HADOOP-9789. Support server advertised kerberos principals (daryn)
 
 
+    HADOOP-8814. Replace string equals "" by String#isEmpty().
+    (Brandon Li via suresh)
+
+    HADOOP-9802. Support Snappy codec on Windows. (cnauroth)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
   BUG FIXES
   BUG FIXES
@@ -361,6 +384,13 @@ Release 2.1.1-beta - UNRELEASED
     HADOOP-9675. use svn:eol-style native for html to prevent line ending
     HADOOP-9675. use svn:eol-style native for html to prevent line ending
     issues (Colin Patrick McCabe)
     issues (Colin Patrick McCabe)
 
 
+    HADOOP-9757. Har metadata cache can grow without limit (Cristina Abad via daryn)
+
+    HADOOP-9857. Tests block and sometimes timeout on Windows due to invalid
+    entropy source. (cnauroth)
+
+    HADOOP-9381. Document dfs cp -f option. (Keegan Witt, suresh via suresh)
+
 Release 2.1.0-beta - 2013-08-06
 Release 2.1.0-beta - 2013-08-06
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -558,6 +588,10 @@ Release 2.1.0-beta - 2013-08-06
     HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs
     HADOOP-9150. Avoid unnecessary DNS resolution attempts for logical URIs
     (todd)
     (todd)
 
 
+    HADOOP-9845. Update protobuf to 2.5 from 2.4.x. (tucu)
+
+    HADOOP-9872. Improve protoc version handling and detection. (tucu)
+
   BUG FIXES
   BUG FIXES
 
 
     HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)
     HADOOP-9294. GetGroupsTestBase fails on Windows. (Chris Nauroth via suresh)

+ 15 - 0
hadoop-common-project/hadoop-common/pom.xml

@@ -308,6 +308,8 @@
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
               </imports>
               </imports>
@@ -336,6 +338,8 @@
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/src/test/proto</param>
                 <param>${basedir}/src/test/proto</param>
               </imports>
               </imports>
@@ -586,6 +590,13 @@
           <family>Windows</family>
           <family>Windows</family>
         </os>
         </os>
       </activation>
       </activation>
+      <properties>
+        <snappy.prefix></snappy.prefix>
+        <snappy.lib></snappy.lib>
+        <snappy.include></snappy.include>
+        <require.snappy>false</require.snappy>
+        <bundle.snappy.in.bin>true</bundle.snappy.in.bin>
+      </properties>
       <build>
       <build>
         <plugins>
         <plugins>
           <plugin>
           <plugin>
@@ -670,6 +681,10 @@
                     <argument>/nologo</argument>
                     <argument>/nologo</argument>
                     <argument>/p:Configuration=Release</argument>
                     <argument>/p:Configuration=Release</argument>
                     <argument>/p:OutDir=${project.build.directory}/bin/</argument>
                     <argument>/p:OutDir=${project.build.directory}/bin/</argument>
+                    <argument>/p:CustomSnappyPrefix=${snappy.prefix}</argument>
+                    <argument>/p:CustomSnappyLib=${snappy.lib}</argument>
+                    <argument>/p:CustomSnappyInclude=${snappy.include}</argument>
+                    <argument>/p:RequireSnappy=${require.snappy}</argument>
                   </arguments>
                   </arguments>
                 </configuration>
                 </configuration>
               </execution>
               </execution>

+ 11 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/DelegateToFileSystem.java

@@ -113,7 +113,14 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
 
 
   @Override
   @Override
   public FileStatus getFileLinkStatus(final Path f) throws IOException {
   public FileStatus getFileLinkStatus(final Path f) throws IOException {
-    return getFileStatus(f);
+    FileStatus status = fsImpl.getFileLinkStatus(f);
+    // FileSystem#getFileLinkStatus qualifies the link target
+    // AbstractFileSystem needs to return it plain since it's qualified
+    // in FileContext, so re-get and set the plain target
+    if (status.isSymlink()) {
+      status.setSymlink(fsImpl.getLinkTarget(f));
+    }
+    return status;
   }
   }
 
 
   @Override
   @Override
@@ -199,22 +206,18 @@ public abstract class DelegateToFileSystem extends AbstractFileSystem {
 
 
   @Override
   @Override
   public boolean supportsSymlinks() {
   public boolean supportsSymlinks() {
-    return false;
+    return fsImpl.supportsSymlinks();
   }  
   }  
   
   
   @Override
   @Override
   public void createSymlink(Path target, Path link, boolean createParent) 
   public void createSymlink(Path target, Path link, boolean createParent) 
       throws IOException { 
       throws IOException { 
-    throw new IOException("File system does not support symlinks");
+    fsImpl.createSymlink(target, link, createParent);
   } 
   } 
   
   
   @Override
   @Override
   public Path getLinkTarget(final Path f) throws IOException {
   public Path getLinkTarget(final Path f) throws IOException {
-    /* We should never get here. Any file system that threw an 
-     * UnresolvedLinkException, causing this function to be called,
-     * should override getLinkTarget. 
-     */
-    throw new AssertionError();
+    return fsImpl.getLinkTarget(f);
   }
   }
 
 
   @Override //AbstractFileSystem
   @Override //AbstractFileSystem

+ 31 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HarFileSystem.java

@@ -24,11 +24,12 @@ import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
 import java.net.URLDecoder;
 import java.net.URLDecoder;
 import java.util.ArrayList;
 import java.util.ArrayList;
+import java.util.Collections;
 import java.util.List;
 import java.util.List;
+import java.util.LinkedHashMap;
 import java.util.Map;
 import java.util.Map;
 import java.util.TreeMap;
 import java.util.TreeMap;
 import java.util.HashMap;
 import java.util.HashMap;
-import java.util.concurrent.ConcurrentHashMap;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -56,10 +57,12 @@ public class HarFileSystem extends FilterFileSystem {
 
 
   private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
   private static final Log LOG = LogFactory.getLog(HarFileSystem.class);
 
 
+  public static final String METADATA_CACHE_ENTRIES_KEY = "fs.har.metadatacache.entries";
+  public static final int METADATA_CACHE_ENTRIES_DEFAULT = 10;
+
   public static final int VERSION = 3;
   public static final int VERSION = 3;
 
 
-  private static final Map<URI, HarMetaData> harMetaCache =
-      new ConcurrentHashMap<URI, HarMetaData>();
+  private static Map<URI, HarMetaData> harMetaCache;
 
 
   // uri representation of this Har filesystem
   // uri representation of this Har filesystem
   private URI uri;
   private URI uri;
@@ -98,7 +101,14 @@ public class HarFileSystem extends FilterFileSystem {
   public HarFileSystem(FileSystem fs) {
   public HarFileSystem(FileSystem fs) {
     super(fs);
     super(fs);
   }
   }
-  
+ 
+  private synchronized void initializeMetadataCache(Configuration conf) {
+    if (harMetaCache == null) {
+      int cacheSize = conf.getInt(METADATA_CACHE_ENTRIES_KEY, METADATA_CACHE_ENTRIES_DEFAULT);
+      harMetaCache = Collections.synchronizedMap(new LruCache<URI, HarMetaData>(cacheSize));
+    }
+  }
+ 
   /**
   /**
    * Initialize a Har filesystem per har archive. The 
    * Initialize a Har filesystem per har archive. The 
    * archive home directory is the top level directory
    * archive home directory is the top level directory
@@ -114,6 +124,9 @@ public class HarFileSystem extends FilterFileSystem {
    */
    */
   @Override
   @Override
   public void initialize(URI name, Configuration conf) throws IOException {
   public void initialize(URI name, Configuration conf) throws IOException {
+    // initialize the metadata cache, if needed
+    initializeMetadataCache(conf);
+
     // decode the name
     // decode the name
     URI underLyingURI = decodeHarURI(name, conf);
     URI underLyingURI = decodeHarURI(name, conf);
     // we got the right har Path- now check if this is 
     // we got the right har Path- now check if this is 
@@ -1117,4 +1130,18 @@ public class HarFileSystem extends FilterFileSystem {
   HarMetaData getMetadata() {
   HarMetaData getMetadata() {
     return metadata;
     return metadata;
   }
   }
+
+  private static class LruCache<K, V> extends LinkedHashMap<K, V> {
+    private final int MAX_ENTRIES;
+
+    public LruCache(int maxEntries) {
+        super(maxEntries + 1, 1.0f, true);
+        MAX_ENTRIES = maxEntries;
+    }
+
+    @Override
+    protected boolean removeEldestEntry(Map.Entry<K, V> eldest) {
+        return size() > MAX_ENTRIES;
+    }
+  }
 }
 }

+ 5 - 35
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/HardLink.java

@@ -41,15 +41,6 @@ import org.apache.hadoop.util.Shell;
  */
  */
 public class HardLink { 
 public class HardLink { 
 
 
-  public enum OSType {
-    OS_TYPE_UNIX,
-    OS_TYPE_WIN,
-    OS_TYPE_SOLARIS,
-    OS_TYPE_MAC,
-    OS_TYPE_FREEBSD
-  }
-  
-  public static OSType osType;
   private static HardLinkCommandGetter getHardLinkCommand;
   private static HardLinkCommandGetter getHardLinkCommand;
   
   
   public final LinkStats linkStats; //not static
   public final LinkStats linkStats; //not static
@@ -57,19 +48,18 @@ public class HardLink {
   //initialize the command "getters" statically, so can use their 
   //initialize the command "getters" statically, so can use their 
   //methods without instantiating the HardLink object
   //methods without instantiating the HardLink object
   static { 
   static { 
-    osType = getOSType();
-    if (osType == OSType.OS_TYPE_WIN) {
+    if (Shell.WINDOWS) {
       // Windows
       // Windows
       getHardLinkCommand = new HardLinkCGWin();
       getHardLinkCommand = new HardLinkCGWin();
     } else {
     } else {
-      // Unix
+      // Unix or Linux
       getHardLinkCommand = new HardLinkCGUnix();
       getHardLinkCommand = new HardLinkCGUnix();
       //override getLinkCountCommand for the particular Unix variant
       //override getLinkCountCommand for the particular Unix variant
       //Linux is already set as the default - {"stat","-c%h", null}
       //Linux is already set as the default - {"stat","-c%h", null}
-      if (osType == OSType.OS_TYPE_MAC || osType == OSType.OS_TYPE_FREEBSD) {
+      if (Shell.MAC || Shell.FREEBSD) {
         String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null};
         String[] linkCountCmdTemplate = {"/usr/bin/stat","-f%l", null};
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);
-      } else if (osType == OSType.OS_TYPE_SOLARIS) {
+      } else if (Shell.SOLARIS) {
         String[] linkCountCmdTemplate = {"ls","-l", null};
         String[] linkCountCmdTemplate = {"ls","-l", null};
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);        
         HardLinkCGUnix.setLinkCountCmdTemplate(linkCountCmdTemplate);        
       }
       }
@@ -80,26 +70,6 @@ public class HardLink {
     linkStats = new LinkStats();
     linkStats = new LinkStats();
   }
   }
   
   
-  static private OSType getOSType() {
-    String osName = System.getProperty("os.name");
-    if (Shell.WINDOWS) {
-      return OSType.OS_TYPE_WIN;
-    }
-    else if (osName.contains("SunOS") 
-            || osName.contains("Solaris")) {
-       return OSType.OS_TYPE_SOLARIS;
-    }
-    else if (osName.contains("Mac")) {
-       return OSType.OS_TYPE_MAC;
-    }
-    else if (osName.contains("FreeBSD")) {
-       return OSType.OS_TYPE_FREEBSD;
-    }
-    else {
-      return OSType.OS_TYPE_UNIX;
-    }
-  }
-  
   /**
   /**
    * This abstract class bridges the OS-dependent implementations of the 
    * This abstract class bridges the OS-dependent implementations of the 
    * needed functionality for creating hardlinks and querying link counts.
    * needed functionality for creating hardlinks and querying link counts.
@@ -548,7 +518,7 @@ public class HardLink {
       if (inpMsg == null || exitValue != 0) {
       if (inpMsg == null || exitValue != 0) {
         throw createIOException(fileName, inpMsg, errMsg, exitValue, null);
         throw createIOException(fileName, inpMsg, errMsg, exitValue, null);
       }
       }
-      if (osType == OSType.OS_TYPE_SOLARIS) {
+      if (Shell.SOLARIS) {
         String[] result = inpMsg.split("\\s+");
         String[] result = inpMsg.split("\\s+");
         return Integer.parseInt(result[1]);
         return Integer.parseInt(result[1]);
       } else {
       } else {

+ 67 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/RawLocalFileSystem.java

@@ -51,6 +51,7 @@ import org.apache.hadoop.util.StringUtils;
 public class RawLocalFileSystem extends FileSystem {
 public class RawLocalFileSystem extends FileSystem {
   static final URI NAME = URI.create("file:///");
   static final URI NAME = URI.create("file:///");
   private Path workingDir;
   private Path workingDir;
+  private static final boolean useDeprecatedFileStatus = !Stat.isAvailable();
   
   
   public RawLocalFileSystem() {
   public RawLocalFileSystem() {
     workingDir = getInitialWorkingDirectory();
     workingDir = getInitialWorkingDirectory();
@@ -385,8 +386,11 @@ public class RawLocalFileSystem extends FileSystem {
       throw new FileNotFoundException("File " + f + " does not exist");
       throw new FileNotFoundException("File " + f + " does not exist");
     }
     }
     if (localf.isFile()) {
     if (localf.isFile()) {
+      if (!useDeprecatedFileStatus) {
+        return new FileStatus[] { getFileStatus(f) };
+      }
       return new FileStatus[] {
       return new FileStatus[] {
-        new RawLocalFileStatus(localf, getDefaultBlockSize(f), this) };
+        new DeprecatedRawLocalFileStatus(localf, getDefaultBlockSize(f), this)};
     }
     }
 
 
     File[] names = localf.listFiles();
     File[] names = localf.listFiles();
@@ -516,15 +520,22 @@ public class RawLocalFileSystem extends FileSystem {
   
   
   @Override
   @Override
   public FileStatus getFileStatus(Path f) throws IOException {
   public FileStatus getFileStatus(Path f) throws IOException {
+    return getFileLinkStatusInternal(f, true);
+  }
+
+  @Deprecated
+  private FileStatus deprecatedGetFileStatus(Path f) throws IOException {
     File path = pathToFile(f);
     File path = pathToFile(f);
     if (path.exists()) {
     if (path.exists()) {
-      return new RawLocalFileStatus(pathToFile(f), getDefaultBlockSize(f), this);
+      return new DeprecatedRawLocalFileStatus(pathToFile(f),
+          getDefaultBlockSize(f), this);
     } else {
     } else {
       throw new FileNotFoundException("File " + f + " does not exist");
       throw new FileNotFoundException("File " + f + " does not exist");
     }
     }
   }
   }
 
 
-  static class RawLocalFileStatus extends FileStatus {
+  @Deprecated
+  static class DeprecatedRawLocalFileStatus extends FileStatus {
     /* We can add extra fields here. It breaks at least CopyFiles.FilePair().
     /* We can add extra fields here. It breaks at least CopyFiles.FilePair().
      * We recognize if the information is already loaded by check if
      * We recognize if the information is already loaded by check if
      * onwer.equals("").
      * onwer.equals("").
@@ -533,7 +544,7 @@ public class RawLocalFileSystem extends FileSystem {
       return !super.getOwner().isEmpty(); 
       return !super.getOwner().isEmpty(); 
     }
     }
     
     
-    RawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) { 
+    DeprecatedRawLocalFileStatus(File f, long defaultBlockSize, FileSystem fs) {
       super(f.length(), f.isDirectory(), 1, defaultBlockSize,
       super(f.length(), f.isDirectory(), 1, defaultBlockSize,
           f.lastModified(), new Path(f.getPath()).makeQualified(fs.getUri(),
           f.lastModified(), new Path(f.getPath()).makeQualified(fs.getUri(),
             fs.getWorkingDirectory()));
             fs.getWorkingDirectory()));
@@ -699,7 +710,7 @@ public class RawLocalFileSystem extends FileSystem {
    */
    */
   @Override
   @Override
   public FileStatus getFileLinkStatus(final Path f) throws IOException {
   public FileStatus getFileLinkStatus(final Path f) throws IOException {
-    FileStatus fi = getFileLinkStatusInternal(f);
+    FileStatus fi = getFileLinkStatusInternal(f, false);
     // getFileLinkStatus is supposed to return a symlink with a
     // getFileLinkStatus is supposed to return a symlink with a
     // qualified path
     // qualified path
     if (fi.isSymlink()) {
     if (fi.isSymlink()) {
@@ -710,7 +721,35 @@ public class RawLocalFileSystem extends FileSystem {
     return fi;
     return fi;
   }
   }
 
 
-  private FileStatus getFileLinkStatusInternal(final Path f) throws IOException {
+  /**
+   * Public {@link FileStatus} methods delegate to this function, which in turn
+   * either call the new {@link Stat} based implementation or the deprecated
+   * methods based on platform support.
+   * 
+   * @param f Path to stat
+   * @param dereference whether to dereference the final path component if a
+   *          symlink
+   * @return FileStatus of f
+   * @throws IOException
+   */
+  private FileStatus getFileLinkStatusInternal(final Path f,
+      boolean dereference) throws IOException {
+    if (!useDeprecatedFileStatus) {
+      return getNativeFileLinkStatus(f, dereference);
+    } else if (dereference) {
+      return deprecatedGetFileStatus(f);
+    } else {
+      return deprecatedGetFileLinkStatusInternal(f);
+    }
+  }
+
+  /**
+   * Deprecated. Remains for legacy support. Should be removed when {@link Stat}
+   * gains support for Windows and other operating systems.
+   */
+  @Deprecated
+  private FileStatus deprecatedGetFileLinkStatusInternal(final Path f)
+      throws IOException {
     String target = FileUtil.readLink(new File(f.toString()));
     String target = FileUtil.readLink(new File(f.toString()));
 
 
     try {
     try {
@@ -746,10 +785,31 @@ public class RawLocalFileSystem extends FileSystem {
       throw e;
       throw e;
     }
     }
   }
   }
+  /**
+   * Calls out to platform's native stat(1) implementation to get file metadata
+   * (permissions, user, group, atime, mtime, etc). This works around the lack
+   * of lstat(2) in Java 6.
+   * 
+   *  Currently, the {@link Stat} class used to do this only supports Linux
+   *  and FreeBSD, so the old {@link #deprecatedGetFileLinkStatusInternal(Path)}
+   *  implementation (deprecated) remains further OS support is added.
+   *
+   * @param f File to stat
+   * @param dereference whether to dereference symlinks
+   * @return FileStatus of f
+   * @throws IOException
+   */
+  private FileStatus getNativeFileLinkStatus(final Path f,
+      boolean dereference) throws IOException {
+    checkPath(f);
+    Stat stat = new Stat(f, getDefaultBlockSize(f), dereference, this);
+    FileStatus status = stat.getFileStatus();
+    return status;
+  }
 
 
   @Override
   @Override
   public Path getLinkTarget(Path f) throws IOException {
   public Path getLinkTarget(Path f) throws IOException {
-    FileStatus fi = getFileLinkStatusInternal(f);
+    FileStatus fi = getFileLinkStatusInternal(f, false);
     // return an unqualified symlink target
     // return an unqualified symlink target
     return fi.getSymlink();
     return fi.getSymlink();
   }
   }

+ 167 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/Stat.java

@@ -0,0 +1,167 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.IOException;
+import java.util.NoSuchElementException;
+import java.util.StringTokenizer;
+
+import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.fs.permission.FsPermission;
+import org.apache.hadoop.util.Shell;
+
+import com.google.common.annotations.VisibleForTesting;
+
+/**
+ * Wrapper for the Unix stat(1) command. Used to workaround the lack of 
+ * lstat(2) in Java 6.
+ */
+@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
+@InterfaceStability.Evolving
+public class Stat extends Shell {
+
+  private final Path original;
+  private final Path qualified;
+  private final Path path;
+  private final long blockSize;
+  private final boolean dereference;
+
+  private FileStatus stat;
+
+  public Stat(Path path, long blockSize, boolean deref, FileSystem fs)
+      throws IOException {
+    super(0L, true);
+    // Original path
+    this.original = path;
+    // Qualify the original and strip out URI fragment via toUri().getPath()
+    Path stripped = new Path(
+        original.makeQualified(fs.getUri(), fs.getWorkingDirectory())
+        .toUri().getPath());
+    // Re-qualify the bare stripped path and store it
+    this.qualified = 
+        stripped.makeQualified(fs.getUri(), fs.getWorkingDirectory());
+    // Strip back down to a plain path
+    this.path = new Path(qualified.toUri().getPath());
+    this.blockSize = blockSize;
+    this.dereference = deref;
+  }
+
+  public FileStatus getFileStatus() throws IOException {
+    run();
+    return stat;
+  }
+
+  /**
+   * Whether Stat is supported on the current platform
+   * @return
+   */
+  public static boolean isAvailable() {
+    if (Shell.LINUX || Shell.FREEBSD) {
+      return true;
+    }
+    return false;
+  }
+
+  @VisibleForTesting
+  FileStatus getFileStatusForTesting() {
+    return stat;
+  }
+
+  @Override
+  protected String[] getExecString() {
+    String derefFlag = "-";
+    if (dereference) {
+      derefFlag = "-L";
+    }
+    if (Shell.LINUX) {
+      return new String[] {
+          "stat", derefFlag + "c", "%s,%F,%Y,%X,%a,%U,%G,%N", path.toString() };
+    } else if (Shell.FREEBSD) {
+      return new String[] {
+          "stat", derefFlag + "f", "%z,%HT,%m,%a,%Op,%Su,%Sg,`link' -> `%Y'",
+          path.toString() };
+    } else {
+      throw new UnsupportedOperationException(
+          "stat is not supported on this platform");
+    }
+  }
+
+  @Override
+  protected void parseExecResult(BufferedReader lines) throws IOException {
+    // Reset stat
+    stat = null;
+
+    String line = lines.readLine();
+    if (line == null) {
+      throw new IOException("Unable to stat path: " + original);
+    }
+    if (line.endsWith("No such file or directory") ||
+        line.endsWith("Not a directory")) {
+      throw new FileNotFoundException("File " + original + " does not exist");
+    }
+    if (line.endsWith("Too many levels of symbolic links")) {
+      throw new IOException("Possible cyclic loop while following symbolic" +
+          " link " + original);
+    }
+    // 6,symbolic link,6,1373584236,1373584236,lrwxrwxrwx,andrew,andrew,`link' -> `target'
+    StringTokenizer tokens = new StringTokenizer(line, ",");
+    try {
+      long length = Long.parseLong(tokens.nextToken());
+      boolean isDir = tokens.nextToken().equalsIgnoreCase("directory") ? true
+          : false;
+      // Convert from seconds to milliseconds
+      long modTime = Long.parseLong(tokens.nextToken())*1000;
+      long accessTime = Long.parseLong(tokens.nextToken())*1000;
+      String octalPerms = tokens.nextToken();
+      // FreeBSD has extra digits beyond 4, truncate them
+      if (octalPerms.length() > 4) {
+        int len = octalPerms.length();
+        octalPerms = octalPerms.substring(len-4, len);
+      }
+      FsPermission perms = new FsPermission(Short.parseShort(octalPerms, 8));
+      String owner = tokens.nextToken();
+      String group = tokens.nextToken();
+      String symStr = tokens.nextToken();
+      // 'notalink'
+      // 'link' -> `target'
+      // '' -> ''
+      Path symlink = null;
+      StringTokenizer symTokens = new StringTokenizer(symStr, "`");
+      symTokens.nextToken();
+      try {
+        String target = symTokens.nextToken();
+        target = target.substring(0, target.length()-1);
+        if (!target.isEmpty()) {
+          symlink = new Path(target);
+        }
+      } catch (NoSuchElementException e) {
+        // null if not a symlink
+      }
+      // Set stat
+      stat = new FileStatus(length, isDir, 1, blockSize, modTime, accessTime,
+          perms, owner, group, symlink, qualified);
+    } catch (NumberFormatException e) {
+      throw new IOException("Unexpected stat output: " + line, e);
+    } catch (NoSuchElementException e) {
+      throw new IOException("Unexpected stat output: " + line, e);
+    }
+  }
+}

+ 5 - 89
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/local/RawLocalFs.java

@@ -17,8 +17,6 @@
  */
  */
 package org.apache.hadoop.fs.local;
 package org.apache.hadoop.fs.local;
 
 
-import java.io.File;
-import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.URI;
 import java.net.URI;
 import java.net.URISyntaxException;
 import java.net.URISyntaxException;
@@ -28,13 +26,9 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.AbstractFileSystem;
 import org.apache.hadoop.fs.DelegateToFileSystem;
 import org.apache.hadoop.fs.DelegateToFileSystem;
-import org.apache.hadoop.fs.FileStatus;
-import org.apache.hadoop.fs.FileUtil;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsConstants;
 import org.apache.hadoop.fs.FsServerDefaults;
 import org.apache.hadoop.fs.FsServerDefaults;
-import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.RawLocalFileSystem;
 import org.apache.hadoop.fs.RawLocalFileSystem;
-import org.apache.hadoop.fs.permission.FsPermission;
 
 
 /**
 /**
  * The RawLocalFs implementation of AbstractFileSystem.
  * The RawLocalFs implementation of AbstractFileSystem.
@@ -72,90 +66,12 @@ public class RawLocalFs extends DelegateToFileSystem {
   public FsServerDefaults getServerDefaults() throws IOException {
   public FsServerDefaults getServerDefaults() throws IOException {
     return LocalConfigKeys.getServerDefaults();
     return LocalConfigKeys.getServerDefaults();
   }
   }
-  
-  @Override
-  public boolean supportsSymlinks() {
-    return true;
-  }
-
-  @Override
-  public void createSymlink(Path target, Path link, boolean createParent)
-      throws IOException {
-    final String targetScheme = target.toUri().getScheme();
-    if (targetScheme != null && !"file".equals(targetScheme)) {
-      throw new IOException("Unable to create symlink to non-local file "+
-          "system: "+target.toString());
-    }
-
-    if (createParent) {
-      mkdir(link.getParent(), FsPermission.getDirDefault(), true);
-    }
-
-    // NB: Use createSymbolicLink in java.nio.file.Path once available
-    int result = FileUtil.symLink(target.toString(), link.toString());
-    if (result != 0) {
-      throw new IOException("Error " + result + " creating symlink " +
-          link + " to " + target);
-    }
-  }
 
 
-  /**
-   * Return a FileStatus representing the given path. If the path refers 
-   * to a symlink return a FileStatus representing the link rather than
-   * the object the link refers to.
-   */
-  @Override
-  public FileStatus getFileLinkStatus(final Path f) throws IOException {
-    String target = FileUtil.readLink(new File(f.toString()));
-    try {
-      FileStatus fs = getFileStatus(f);
-      // If f refers to a regular file or directory      
-      if (target.isEmpty()) {
-        return fs;
-      }
-      // Otherwise f refers to a symlink
-      return new FileStatus(fs.getLen(), 
-          false,
-          fs.getReplication(), 
-          fs.getBlockSize(),
-          fs.getModificationTime(),
-          fs.getAccessTime(),
-          fs.getPermission(),
-          fs.getOwner(),
-          fs.getGroup(),
-          new Path(target),
-          f);
-    } catch (FileNotFoundException e) {
-      /* The exists method in the File class returns false for dangling 
-       * links so we can get a FileNotFoundException for links that exist.
-       * It's also possible that we raced with a delete of the link. Use
-       * the readBasicFileAttributes method in java.nio.file.attributes 
-       * when available.
-       */
-      if (!target.isEmpty()) {
-        return new FileStatus(0, false, 0, 0, 0, 0, FsPermission.getDefault(), 
-            "", "", new Path(target), f);        
-      }
-      // f refers to a file or directory that does not exist
-      throw e;
-    }
-  }
-  
-   @Override
-   public boolean isValidName(String src) {
-     // Different local file systems have different validation rules.  Skip
-     // validation here and just let the OS handle it.  This is consistent with
-     // RawLocalFileSystem.
-     return true;
-   }
-  
   @Override
   @Override
-  public Path getLinkTarget(Path f) throws IOException {
-    /* We should never get here. Valid local links are resolved transparently
-     * by the underlying local file system and accessing a dangling link will 
-     * result in an IOException, not an UnresolvedLinkException, so FileContext
-     * should never call this function.
-     */
-    throw new AssertionError();
+  public boolean isValidName(String src) {
+    // Different local file systems have different validation rules. Skip
+    // validation here and just let the OS handle it. This is consistent with
+    // RawLocalFileSystem.
+    return true;
   }
   }
 }
 }

+ 4 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/CopyCommands.java

@@ -133,7 +133,8 @@ class CopyCommands {
       "Copy files that match the file pattern <src> to a\n" +
       "Copy files that match the file pattern <src> to a\n" +
       "destination.  When copying multiple files, the destination\n" +
       "destination.  When copying multiple files, the destination\n" +
       "must be a directory. Passing -p preserves access and\n" +
       "must be a directory. Passing -p preserves access and\n" +
-      "modification times, ownership and the mode.\n";
+      "modification times, ownership and the mode. Passing -f\n" +
+      "overwrites the destination if it already exists.\n";
     
     
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {
@@ -186,7 +187,8 @@ class CopyCommands {
       "into fs. Copying fails if the file already\n" +
       "into fs. Copying fails if the file already\n" +
       "exists, unless the -f flag is given. Passing\n" +
       "exists, unless the -f flag is given. Passing\n" +
       "-p preserves access and modification times,\n" +
       "-p preserves access and modification times,\n" +
-      "ownership and the mode.\n";
+      "ownership and the mode. Passing -f overwrites\n" +
+      "the destination if it already exists.\n";
 
 
     @Override
     @Override
     protected void processOptions(LinkedList<String> args) throws IOException {
     protected void processOptions(LinkedList<String> args) throws IOException {

+ 4 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Display.java

@@ -35,8 +35,10 @@ import org.apache.avro.Schema;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.AvroFSInput;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FSDataInputStream;
 import org.apache.hadoop.fs.FileChecksum;
 import org.apache.hadoop.fs.FileChecksum;
+import org.apache.hadoop.fs.FileContext;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.FileStatus;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.Path;
 import org.apache.hadoop.fs.PathIsDirectoryException;
 import org.apache.hadoop.fs.PathIsDirectoryException;
@@ -259,8 +261,9 @@ class Display extends FsCommand {
       pos = 0;
       pos = 0;
       buffer = new byte[0];
       buffer = new byte[0];
       GenericDatumReader<Object> reader = new GenericDatumReader<Object>();
       GenericDatumReader<Object> reader = new GenericDatumReader<Object>();
+      FileContext fc = FileContext.getFileContext(new Configuration());
       fileReader =
       fileReader =
-        DataFileReader.openReader(new File(status.getPath().toUri()), reader);
+        DataFileReader.openReader(new AvroFSInput(fc, status.getPath()),reader);
       Schema schema = fileReader.getSchema();
       Schema schema = fileReader.getSchema();
       writer = new GenericDatumWriter<Object>(schema);
       writer = new GenericDatumWriter<Object>(schema);
       output = new ByteArrayOutputStream();
       output = new ByteArrayOutputStream();

+ 2 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/lib/DefaultMetricsSystem.java

@@ -46,8 +46,8 @@ public enum DefaultMetricsSystem {
   @VisibleForTesting
   @VisibleForTesting
   volatile boolean miniClusterMode = false;
   volatile boolean miniClusterMode = false;
   
   
-  final UniqueNames mBeanNames = new UniqueNames();
-  final UniqueNames sourceNames = new UniqueNames();
+  transient final UniqueNames mBeanNames = new UniqueNames();
+  transient final UniqueNames sourceNames = new UniqueNames();
 
 
   /**
   /**
    * Convenience method to initialize the metrics system
    * Convenience method to initialize the metrics system

+ 12 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -439,7 +439,6 @@ public class UserGroupInformation {
       } else {
       } else {
         USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
         USER_KERBEROS_OPTIONS.put("doNotPrompt", "true");
         USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
         USER_KERBEROS_OPTIONS.put("useTicketCache", "true");
-        USER_KERBEROS_OPTIONS.put("renewTGT", "true");
       }
       }
       String ticketCache = System.getenv("KRB5CCNAME");
       String ticketCache = System.getenv("KRB5CCNAME");
       if (ticketCache != null) {
       if (ticketCache != null) {
@@ -450,6 +449,7 @@ public class UserGroupInformation {
           USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
           USER_KERBEROS_OPTIONS.put("ticketCache", ticketCache);
         }
         }
       }
       }
+      USER_KERBEROS_OPTIONS.put("renewTGT", "true");
       USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
       USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
     }
     }
     private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
     private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
@@ -465,8 +465,8 @@ public class UserGroupInformation {
         KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
         KEYTAB_KERBEROS_OPTIONS.put("doNotPrompt", "true");
         KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
         KEYTAB_KERBEROS_OPTIONS.put("useKeyTab", "true");
         KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
         KEYTAB_KERBEROS_OPTIONS.put("storeKey", "true");
-        KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
       }
       }
+      KEYTAB_KERBEROS_OPTIONS.put("refreshKrb5Config", "true");
       KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);      
       KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);      
     }
     }
     private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
     private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
@@ -627,11 +627,17 @@ public class UserGroupInformation {
     }
     }
     try {
     try {
       Map<String,String> krbOptions = new HashMap<String,String>();
       Map<String,String> krbOptions = new HashMap<String,String>();
-      krbOptions.put("doNotPrompt", "true");
-      krbOptions.put("useTicketCache", "true");
-      krbOptions.put("useKeyTab", "false");
+      if (IBM_JAVA) {
+        krbOptions.put("useDefaultCcache", "true");
+        // The first value searched when "useDefaultCcache" is used.
+        System.setProperty("KRB5CCNAME", ticketCache);
+      } else {
+        krbOptions.put("doNotPrompt", "true");
+        krbOptions.put("useTicketCache", "true");
+        krbOptions.put("useKeyTab", "false");
+        krbOptions.put("ticketCache", ticketCache);
+      }
       krbOptions.put("renewTGT", "false");
       krbOptions.put("renewTGT", "false");
-      krbOptions.put("ticketCache", ticketCache);
       krbOptions.putAll(HadoopConfiguration.BASIC_JAAS_OPTIONS);
       krbOptions.putAll(HadoopConfiguration.BASIC_JAAS_OPTIONS);
       AppConfigurationEntry ace = new AppConfigurationEntry(
       AppConfigurationEntry ace = new AppConfigurationEntry(
           KerberosUtil.getKrb5LoginModuleName(),
           KerberosUtil.getKrb5LoginModuleName(),

+ 48 - 8
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/Shell.java

@@ -58,6 +58,45 @@ abstract public class Shell {
   /** Windows CreateProcess synchronization object */
   /** Windows CreateProcess synchronization object */
   public static final Object WindowsProcessLaunchLock = new Object();
   public static final Object WindowsProcessLaunchLock = new Object();
 
 
+  // OSType detection
+
+  public enum OSType {
+    OS_TYPE_LINUX,
+    OS_TYPE_WIN,
+    OS_TYPE_SOLARIS,
+    OS_TYPE_MAC,
+    OS_TYPE_FREEBSD,
+    OS_TYPE_OTHER
+  }
+
+  public static final OSType osType = getOSType();
+
+  static private OSType getOSType() {
+    String osName = System.getProperty("os.name");
+    if (osName.startsWith("Windows")) {
+      return OSType.OS_TYPE_WIN;
+    } else if (osName.contains("SunOS") || osName.contains("Solaris")) {
+      return OSType.OS_TYPE_SOLARIS;
+    } else if (osName.contains("Mac")) {
+      return OSType.OS_TYPE_MAC;
+    } else if (osName.contains("FreeBSD")) {
+      return OSType.OS_TYPE_FREEBSD;
+    } else if (osName.startsWith("Linux")) {
+      return OSType.OS_TYPE_LINUX;
+    } else {
+      // Some other form of Unix
+      return OSType.OS_TYPE_OTHER;
+    }
+  }
+
+  // Helper static vars for each platform
+  public static final boolean WINDOWS = (osType == OSType.OS_TYPE_WIN);
+  public static final boolean SOLARIS = (osType == OSType.OS_TYPE_SOLARIS);
+  public static final boolean MAC     = (osType == OSType.OS_TYPE_MAC);
+  public static final boolean FREEBSD = (osType == OSType.OS_TYPE_FREEBSD);
+  public static final boolean LINUX   = (osType == OSType.OS_TYPE_LINUX);
+  public static final boolean OTHER   = (osType == OSType.OS_TYPE_OTHER);
+
   /** a Unix command to get the current user's groups list */
   /** a Unix command to get the current user's groups list */
   public static String[] getGroupsCommand() {
   public static String[] getGroupsCommand() {
     return (WINDOWS)? new String[]{"cmd", "/c", "groups"}
     return (WINDOWS)? new String[]{"cmd", "/c", "groups"}
@@ -282,13 +321,6 @@ abstract public class Shell {
     return exeFile.getCanonicalPath();
     return exeFile.getCanonicalPath();
   }
   }
 
 
-  /** Set to true on Windows platforms */
-  public static final boolean WINDOWS /* borrowed from Path.WINDOWS */
-                = System.getProperty("os.name").startsWith("Windows");
-
-  public static final boolean LINUX
-                = System.getProperty("os.name").startsWith("Linux");
-  
   /** a Windows utility to emulate Unix commands */
   /** a Windows utility to emulate Unix commands */
   public static final String WINUTILS = getWinUtilsPath();
   public static final String WINUTILS = getWinUtilsPath();
 
 
@@ -336,6 +368,7 @@ abstract public class Shell {
 
 
   private long    interval;   // refresh interval in msec
   private long    interval;   // refresh interval in msec
   private long    lastTime;   // last time the command was performed
   private long    lastTime;   // last time the command was performed
+  final private boolean redirectErrorStream; // merge stdout and stderr
   private Map<String, String> environment; // env for the command execution
   private Map<String, String> environment; // env for the command execution
   private File dir;
   private File dir;
   private Process process; // sub process used to execute the command
   private Process process; // sub process used to execute the command
@@ -348,13 +381,18 @@ abstract public class Shell {
     this(0L);
     this(0L);
   }
   }
   
   
+  public Shell(long interval) {
+    this(interval, false);
+  }
+
   /**
   /**
    * @param interval the minimum duration to wait before re-executing the 
    * @param interval the minimum duration to wait before re-executing the 
    *        command.
    *        command.
    */
    */
-  public Shell( long interval ) {
+  public Shell(long interval, boolean redirectErrorStream) {
     this.interval = interval;
     this.interval = interval;
     this.lastTime = (interval<0) ? 0 : -interval;
     this.lastTime = (interval<0) ? 0 : -interval;
+    this.redirectErrorStream = redirectErrorStream;
   }
   }
   
   
   /** set the environment for the command 
   /** set the environment for the command 
@@ -393,6 +431,8 @@ abstract public class Shell {
     if (dir != null) {
     if (dir != null) {
       builder.directory(this.dir);
       builder.directory(this.dir);
     }
     }
+
+    builder.redirectErrorStream(redirectErrorStream);
     
     
     if (Shell.WINDOWS) {
     if (Shell.WINDOWS) {
       synchronized (WindowsProcessLaunchLock) {
       synchronized (WindowsProcessLaunchLock) {

+ 13 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/VersionInfo.java

@@ -90,6 +90,10 @@ public class VersionInfo {
       " source checksum " + _getSrcChecksum();
       " source checksum " + _getSrcChecksum();
   }
   }
 
 
+  protected String _getProtocVersion() {
+    return info.getProperty("protocVersion", "Unknown");
+  }
+
   private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common");
   private static VersionInfo COMMON_VERSION_INFO = new VersionInfo("common");
   /**
   /**
    * Get the Hadoop version.
    * Get the Hadoop version.
@@ -153,12 +157,20 @@ public class VersionInfo {
   public static String getBuildVersion(){
   public static String getBuildVersion(){
     return COMMON_VERSION_INFO._getBuildVersion();
     return COMMON_VERSION_INFO._getBuildVersion();
   }
   }
-  
+
+  /**
+   * Returns the protoc version used for the build.
+   */
+  public static String getProtocVersion(){
+    return COMMON_VERSION_INFO._getProtocVersion();
+  }
+
   public static void main(String[] args) {
   public static void main(String[] args) {
     LOG.debug("version: "+ getVersion());
     LOG.debug("version: "+ getVersion());
     System.out.println("Hadoop " + getVersion());
     System.out.println("Hadoop " + getVersion());
     System.out.println("Subversion " + getUrl() + " -r " + getRevision());
     System.out.println("Subversion " + getUrl() + " -r " + getRevision());
     System.out.println("Compiled by " + getUser() + " on " + getDate());
     System.out.println("Compiled by " + getUser() + " on " + getDate());
+    System.out.println("Compiled with protoc " + getProtocVersion());
     System.out.println("From source with checksum " + getSrcChecksum());
     System.out.println("From source with checksum " + getSrcChecksum());
     System.out.println("This command was run using " + 
     System.out.println("This command was run using " + 
         ClassUtil.findContainingJar(VersionInfo.class));
         ClassUtil.findContainingJar(VersionInfo.class));

+ 26 - 2
hadoop-common-project/hadoop-common/src/main/native/native.vcxproj

@@ -17,7 +17,7 @@
    limitations under the License.
    limitations under the License.
 -->
 -->
 
 
-<Project DefaultTargets="Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+<Project DefaultTargets="CheckRequireSnappy;Build" ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
   <ItemGroup Label="ProjectConfigurations">
   <ItemGroup Label="ProjectConfigurations">
     <ProjectConfiguration Include="Release|x64">
     <ProjectConfiguration Include="Release|x64">
       <Configuration>Release</Configuration>
       <Configuration>Release</Configuration>
@@ -49,6 +49,21 @@
     <IntDir>..\..\..\target\native\$(Configuration)\</IntDir>
     <IntDir>..\..\..\target\native\$(Configuration)\</IntDir>
     <TargetName>hadoop</TargetName>
     <TargetName>hadoop</TargetName>
   </PropertyGroup>
   </PropertyGroup>
+  <PropertyGroup>
+    <SnappyLib Condition="Exists('$(CustomSnappyPrefix)\snappy.dll')">$(CustomSnappyPrefix)</SnappyLib>
+    <SnappyLib Condition="Exists('$(CustomSnappyPrefix)\lib\snappy.dll') And '$(SnappyLib)' == ''">$(CustomSnappyPrefix)\lib</SnappyLib>
+    <SnappyLib Condition="Exists('$(CustomSnappyLib)') And '$(SnappyLib)' == ''">$(CustomSnappyLib)</SnappyLib>
+    <SnappyInclude Condition="Exists('$(CustomSnappyPrefix)\snappy.h')">$(CustomSnappyPrefix)</SnappyInclude>
+    <SnappyInclude Condition="Exists('$(CustomSnappyPrefix)\include\snappy.h') And '$(SnappyInclude)' == ''">$(CustomSnappyPrefix)\include</SnappyInclude>
+    <SnappyInclude Condition="Exists('$(CustomSnappyInclude)') And '$(SnappyInclude)' == ''">$(CustomSnappyInclude)</SnappyInclude>
+    <SnappyEnabled Condition="'$(SnappyLib)' != '' And '$(SnappyInclude)' != ''">true</SnappyEnabled>
+    <IncludePath Condition="'$(SnappyEnabled)' == 'true'">$(SnappyInclude);$(IncludePath)</IncludePath>
+  </PropertyGroup>
+  <Target Name="CheckRequireSnappy">
+    <Error
+      Text="Required snappy library could not be found.  SnappyLibrary=$(SnappyLibrary), SnappyInclude=$(SnappyInclude), CustomSnappyLib=$(CustomSnappyLib), CustomSnappyInclude=$(CustomSnappyInclude), CustomSnappyPrefix=$(CustomSnappyPrefix)"
+      Condition="'$(RequireSnappy)' == 'true' And '$(SnappyEnabled)' != 'true'" />
+  </Target>
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
   <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
     <ClCompile>
     <ClCompile>
       <WarningLevel>Level3</WarningLevel>
       <WarningLevel>Level3</WarningLevel>
@@ -71,6 +86,12 @@
     </Link>
     </Link>
   </ItemDefinitionGroup>
   </ItemDefinitionGroup>
   <ItemGroup>
   <ItemGroup>
+    <ClCompile Include="src\org\apache\hadoop\io\compress\snappy\SnappyCompressor.c" Condition="'$(SnappyEnabled)' == 'true'">
+      <AdditionalOptions>/D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\"</AdditionalOptions>
+    </ClCompile>
+    <ClCompile Include="src\org\apache\hadoop\io\compress\snappy\SnappyDecompressor.c" Condition="'$(SnappyEnabled)' == 'true'">
+      <AdditionalOptions>/D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\"</AdditionalOptions>
+    </ClCompile>
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4.c" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4.c" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4hc.c" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\lz4hc.c" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\Lz4Compressor.c" />
     <ClCompile Include="src\org\apache\hadoop\io\compress\lz4\Lz4Compressor.c" />
@@ -79,12 +100,15 @@
     <ClCompile Include="src\org\apache\hadoop\io\nativeio\NativeIO.c" />
     <ClCompile Include="src\org\apache\hadoop\io\nativeio\NativeIO.c" />
     <ClCompile Include="src\org\apache\hadoop\security\JniBasedUnixGroupsMappingWin.c" />
     <ClCompile Include="src\org\apache\hadoop\security\JniBasedUnixGroupsMappingWin.c" />
     <ClCompile Include="src\org\apache\hadoop\util\bulk_crc32.c" />
     <ClCompile Include="src\org\apache\hadoop\util\bulk_crc32.c" />
-    <ClCompile Include="src\org\apache\hadoop\util\NativeCodeLoader.c" />
+    <ClCompile Include="src\org\apache\hadoop\util\NativeCodeLoader.c">
+      <AdditionalOptions Condition="'$(SnappyEnabled)' == 'true'">/D HADOOP_SNAPPY_LIBRARY=L\"snappy.dll\"</AdditionalOptions>
+    </ClCompile>
     <ClCompile Include="src\org\apache\hadoop\util\NativeCrc32.c" />
     <ClCompile Include="src\org\apache\hadoop\util\NativeCrc32.c" />
   </ItemGroup>
   </ItemGroup>
   <ItemGroup>
   <ItemGroup>
     <ClInclude Include="..\src\org\apache\hadoop\util\crc32c_tables.h" />
     <ClInclude Include="..\src\org\apache\hadoop\util\crc32c_tables.h" />
     <ClInclude Include="..\src\org\apache\hadoop\util\crc32_zlib_polynomial_tables.h" />
     <ClInclude Include="..\src\org\apache\hadoop\util\crc32_zlib_polynomial_tables.h" />
+    <ClInclude Include="src\org\apache\hadoop\io\compress\snappy\org_apache_hadoop_io_compress_snappy.h" />
     <ClInclude Include="src\org\apache\hadoop\io\nativeio\file_descriptor.h" />
     <ClInclude Include="src\org\apache\hadoop\io\nativeio\file_descriptor.h" />
     <ClInclude Include="src\org\apache\hadoop\util\bulk_crc32.h" />
     <ClInclude Include="src\org\apache\hadoop\util\bulk_crc32.h" />
     <ClInclude Include="src\org\apache\hadoop\util\crc32c_tables.h" />
     <ClInclude Include="src\org\apache\hadoop\util\crc32c_tables.h" />

+ 46 - 7
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyCompressor.c

@@ -30,6 +30,10 @@
 #include "config.h"
 #include "config.h"
 #endif // UNIX
 #endif // UNIX
 
 
+#ifdef WINDOWS
+#include "winutils.h"
+#endif
+
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 #include "org_apache_hadoop_io_compress_snappy_SnappyCompressor.h"
 
 
 #define JINT_MAX 0x7fffffff
 #define JINT_MAX 0x7fffffff
@@ -40,11 +44,18 @@ static jfieldID SnappyCompressor_uncompressedDirectBufLen;
 static jfieldID SnappyCompressor_compressedDirectBuf;
 static jfieldID SnappyCompressor_compressedDirectBuf;
 static jfieldID SnappyCompressor_directBufferSize;
 static jfieldID SnappyCompressor_directBufferSize;
 
 
+#ifdef UNIX
 static snappy_status (*dlsym_snappy_compress)(const char*, size_t, char*, size_t*);
 static snappy_status (*dlsym_snappy_compress)(const char*, size_t, char*, size_t*);
+#endif
+
+#ifdef WINDOWS
+typedef snappy_status (__cdecl *__dlsym_snappy_compress)(const char*, size_t, char*, size_t*);
+static __dlsym_snappy_compress dlsym_snappy_compress;
+#endif
 
 
 JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_initIDs
 JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_initIDs
 (JNIEnv *env, jclass clazz){
 (JNIEnv *env, jclass clazz){
-
+#ifdef UNIX
   // Load libsnappy.so
   // Load libsnappy.so
   void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
   void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
   if (!libsnappy) {
   if (!libsnappy) {
@@ -53,10 +64,25 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
     THROW(env, "java/lang/UnsatisfiedLinkError", msg);
     THROW(env, "java/lang/UnsatisfiedLinkError", msg);
     return;
     return;
   }
   }
+#endif
+
+#ifdef WINDOWS
+  HMODULE libsnappy = LoadLibrary(HADOOP_SNAPPY_LIBRARY);
+  if (!libsnappy) {
+    THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load snappy.dll");
+    return;
+  }
+#endif
 
 
   // Locate the requisite symbols from libsnappy.so
   // Locate the requisite symbols from libsnappy.so
+#ifdef UNIX
   dlerror();                                 // Clear any existing error
   dlerror();                                 // Clear any existing error
   LOAD_DYNAMIC_SYMBOL(dlsym_snappy_compress, env, libsnappy, "snappy_compress");
   LOAD_DYNAMIC_SYMBOL(dlsym_snappy_compress, env, libsnappy, "snappy_compress");
+#endif
+
+#ifdef WINDOWS
+  LOAD_DYNAMIC_SYMBOL(__dlsym_snappy_compress, dlsym_snappy_compress, env, libsnappy, "snappy_compress");
+#endif
 
 
   SnappyCompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
   SnappyCompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
                                                  "Ljava/lang/Class;");
                                                  "Ljava/lang/Class;");
@@ -74,6 +100,9 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
 
 
 JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_compressBytesDirect
 JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_compressBytesDirect
 (JNIEnv *env, jobject thisj){
 (JNIEnv *env, jobject thisj){
+  const char* uncompressed_bytes;
+  char* compressed_bytes;
+  snappy_status ret;
   // Get members of SnappyCompressor
   // Get members of SnappyCompressor
   jobject clazz = (*env)->GetStaticObjectField(env, thisj, SnappyCompressor_clazz);
   jobject clazz = (*env)->GetStaticObjectField(env, thisj, SnappyCompressor_clazz);
   jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_uncompressedDirectBuf);
   jobject uncompressed_direct_buf = (*env)->GetObjectField(env, thisj, SnappyCompressor_uncompressedDirectBuf);
@@ -84,7 +113,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
 
 
   // Get the input direct buffer
   // Get the input direct buffer
   LOCK_CLASS(env, clazz, "SnappyCompressor");
   LOCK_CLASS(env, clazz, "SnappyCompressor");
-  const char* uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+  uncompressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
 
   if (uncompressed_bytes == 0) {
   if (uncompressed_bytes == 0) {
@@ -93,7 +122,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
 
 
   // Get the output direct buffer
   // Get the output direct buffer
   LOCK_CLASS(env, clazz, "SnappyCompressor");
   LOCK_CLASS(env, clazz, "SnappyCompressor");
-  char* compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+  compressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
   UNLOCK_CLASS(env, clazz, "SnappyCompressor");
 
 
   if (compressed_bytes == 0) {
   if (compressed_bytes == 0) {
@@ -102,8 +131,8 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyCompresso
 
 
   /* size_t should always be 4 bytes or larger. */
   /* size_t should always be 4 bytes or larger. */
   buf_len = (size_t)compressed_direct_buf_len;
   buf_len = (size_t)compressed_direct_buf_len;
-  snappy_status ret = dlsym_snappy_compress(uncompressed_bytes,
-        uncompressed_direct_buf_len, compressed_bytes, &buf_len);
+  ret = dlsym_snappy_compress(uncompressed_bytes, uncompressed_direct_buf_len,
+        compressed_bytes, &buf_len);
   if (ret != SNAPPY_OK){
   if (ret != SNAPPY_OK){
     THROW(env, "java/lang/InternalError", "Could not compress data. Buffer length is too small.");
     THROW(env, "java/lang/InternalError", "Could not compress data. Buffer length is too small.");
     return 0;
     return 0;
@@ -128,8 +157,18 @@ Java_org_apache_hadoop_io_compress_snappy_SnappyCompressor_getLibraryName(JNIEnv
       return (*env)->NewStringUTF(env, dl_info.dli_fname);
       return (*env)->NewStringUTF(env, dl_info.dli_fname);
     }
     }
   }
   }
-#endif
+
   return (*env)->NewStringUTF(env, HADOOP_SNAPPY_LIBRARY);
   return (*env)->NewStringUTF(env, HADOOP_SNAPPY_LIBRARY);
-}
+#endif
 
 
+#ifdef WINDOWS
+  LPWSTR filename = NULL;
+  GetLibraryName(dlsym_snappy_compress, &filename);
+  if (filename != NULL) {
+    return (*env)->NewString(env, filename, (jsize) wcslen(filename));
+  } else {
+    return (*env)->NewStringUTF(env, "Unavailable");
+  }
+#endif
+}
 #endif //define HADOOP_SNAPPY_LIBRARY
 #endif //define HADOOP_SNAPPY_LIBRARY

+ 31 - 3
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/SnappyDecompressor.c

@@ -37,12 +37,20 @@ static jfieldID SnappyDecompressor_compressedDirectBufLen;
 static jfieldID SnappyDecompressor_uncompressedDirectBuf;
 static jfieldID SnappyDecompressor_uncompressedDirectBuf;
 static jfieldID SnappyDecompressor_directBufferSize;
 static jfieldID SnappyDecompressor_directBufferSize;
 
 
+#ifdef UNIX
 static snappy_status (*dlsym_snappy_uncompress)(const char*, size_t, char*, size_t*);
 static snappy_status (*dlsym_snappy_uncompress)(const char*, size_t, char*, size_t*);
+#endif
+
+#ifdef WINDOWS
+typedef snappy_status (__cdecl *__dlsym_snappy_uncompress)(const char*, size_t, char*, size_t*);
+static __dlsym_snappy_uncompress dlsym_snappy_uncompress;
+#endif
 
 
 JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_initIDs
 JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_initIDs
 (JNIEnv *env, jclass clazz){
 (JNIEnv *env, jclass clazz){
 
 
   // Load libsnappy.so
   // Load libsnappy.so
+#ifdef UNIX
   void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
   void *libsnappy = dlopen(HADOOP_SNAPPY_LIBRARY, RTLD_LAZY | RTLD_GLOBAL);
   if (!libsnappy) {
   if (!libsnappy) {
     char* msg = (char*)malloc(1000);
     char* msg = (char*)malloc(1000);
@@ -50,11 +58,27 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompres
     THROW(env, "java/lang/UnsatisfiedLinkError", msg);
     THROW(env, "java/lang/UnsatisfiedLinkError", msg);
     return;
     return;
   }
   }
+#endif
+
+#ifdef WINDOWS
+  HMODULE libsnappy = LoadLibrary(HADOOP_SNAPPY_LIBRARY);
+  if (!libsnappy) {
+    THROW(env, "java/lang/UnsatisfiedLinkError", "Cannot load snappy.dll");
+    return;
+  }
+#endif
 
 
   // Locate the requisite symbols from libsnappy.so
   // Locate the requisite symbols from libsnappy.so
+#ifdef UNIX
   dlerror();                                 // Clear any existing error
   dlerror();                                 // Clear any existing error
   LOAD_DYNAMIC_SYMBOL(dlsym_snappy_uncompress, env, libsnappy, "snappy_uncompress");
   LOAD_DYNAMIC_SYMBOL(dlsym_snappy_uncompress, env, libsnappy, "snappy_uncompress");
 
 
+#endif
+
+#ifdef WINDOWS
+  LOAD_DYNAMIC_SYMBOL(__dlsym_snappy_uncompress, dlsym_snappy_uncompress, env, libsnappy, "snappy_uncompress");
+#endif
+
   SnappyDecompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
   SnappyDecompressor_clazz = (*env)->GetStaticFieldID(env, clazz, "clazz",
                                                    "Ljava/lang/Class;");
                                                    "Ljava/lang/Class;");
   SnappyDecompressor_compressedDirectBuf = (*env)->GetFieldID(env,clazz,
   SnappyDecompressor_compressedDirectBuf = (*env)->GetFieldID(env,clazz,
@@ -71,6 +95,9 @@ JNIEXPORT void JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompres
 
 
 JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_decompressBytesDirect
 JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompressor_decompressBytesDirect
 (JNIEnv *env, jobject thisj){
 (JNIEnv *env, jobject thisj){
+  const char* compressed_bytes = NULL;
+  char* uncompressed_bytes = NULL;
+  snappy_status ret;
   // Get members of SnappyDecompressor
   // Get members of SnappyDecompressor
   jobject clazz = (*env)->GetStaticObjectField(env,thisj, SnappyDecompressor_clazz);
   jobject clazz = (*env)->GetStaticObjectField(env,thisj, SnappyDecompressor_clazz);
   jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_compressedDirectBuf);
   jobject compressed_direct_buf = (*env)->GetObjectField(env,thisj, SnappyDecompressor_compressedDirectBuf);
@@ -80,7 +107,7 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompres
 
 
   // Get the input direct buffer
   // Get the input direct buffer
   LOCK_CLASS(env, clazz, "SnappyDecompressor");
   LOCK_CLASS(env, clazz, "SnappyDecompressor");
-  const char* compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
+  compressed_bytes = (const char*)(*env)->GetDirectBufferAddress(env, compressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
   UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
 
 
   if (compressed_bytes == 0) {
   if (compressed_bytes == 0) {
@@ -89,14 +116,15 @@ JNIEXPORT jint JNICALL Java_org_apache_hadoop_io_compress_snappy_SnappyDecompres
 
 
   // Get the output direct buffer
   // Get the output direct buffer
   LOCK_CLASS(env, clazz, "SnappyDecompressor");
   LOCK_CLASS(env, clazz, "SnappyDecompressor");
-  char* uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
+  uncompressed_bytes = (char *)(*env)->GetDirectBufferAddress(env, uncompressed_direct_buf);
   UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
   UNLOCK_CLASS(env, clazz, "SnappyDecompressor");
 
 
   if (uncompressed_bytes == 0) {
   if (uncompressed_bytes == 0) {
     return (jint)0;
     return (jint)0;
   }
   }
 
 
-  snappy_status ret = dlsym_snappy_uncompress(compressed_bytes, compressed_direct_buf_len, uncompressed_bytes, &uncompressed_direct_buf_len);
+  ret = dlsym_snappy_uncompress(compressed_bytes, compressed_direct_buf_len,
+        uncompressed_bytes, &uncompressed_direct_buf_len);
   if (ret == SNAPPY_BUFFER_TOO_SMALL){
   if (ret == SNAPPY_BUFFER_TOO_SMALL){
     THROW(env, "java/lang/InternalError", "Could not decompress data. Buffer length is too small.");
     THROW(env, "java/lang/InternalError", "Could not decompress data. Buffer length is too small.");
   } else if (ret == SNAPPY_INVALID_INPUT){
   } else if (ret == SNAPPY_INVALID_INPUT){

+ 4 - 0
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/io/compress/snappy/org_apache_hadoop_io_compress_snappy.h

@@ -21,7 +21,11 @@
 #define ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
 #define ORG_APACHE_HADOOP_IO_COMPRESS_SNAPPY_SNAPPY_H
 
 
 #include "org_apache_hadoop.h"
 #include "org_apache_hadoop.h"
+
+#ifdef UNIX
 #include <dlfcn.h>
 #include <dlfcn.h>
+#endif
+
 #include <jni.h>
 #include <jni.h>
 #include <snappy-c.h>
 #include <snappy-c.h>
 #include <stddef.h>
 #include <stddef.h>

+ 14 - 26
hadoop-common-project/hadoop-common/src/main/native/src/org/apache/hadoop/util/NativeCodeLoader.c

@@ -23,6 +23,10 @@
 #include "config.h"
 #include "config.h"
 #endif // UNIX
 #endif // UNIX
 
 
+#ifdef WINDOWS
+#include "winutils.h"
+#endif
+
 #include <jni.h>
 #include <jni.h>
 
 
 JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSupportsSnappy
 JNIEXPORT jboolean JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_buildSupportsSnappy
@@ -47,32 +51,16 @@ JNIEXPORT jstring JNICALL Java_org_apache_hadoop_util_NativeCodeLoader_getLibrar
 #endif
 #endif
 
 
 #ifdef WINDOWS
 #ifdef WINDOWS
-  SIZE_T ret = 0;
-  DWORD size = MAX_PATH;
   LPWSTR filename = NULL;
   LPWSTR filename = NULL;
-  HMODULE mod = NULL;
-  DWORD err = ERROR_SUCCESS;
-
-  MEMORY_BASIC_INFORMATION mbi;
-  ret = VirtualQuery(Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName,
-    &mbi, sizeof(mbi));
-  if (ret == 0) goto cleanup;
-  mod = mbi.AllocationBase;
-
-  do {
-    filename = (LPWSTR) realloc(filename, size * sizeof(WCHAR));
-    if (filename == NULL) goto cleanup;
-    GetModuleFileName(mod, filename, size);
-    size <<= 1;
-    err = GetLastError();
-  } while (err == ERROR_INSUFFICIENT_BUFFER);
-  
-  if (err != ERROR_SUCCESS) goto cleanup;
-
-  return (*env)->NewString(env, filename, (jsize) wcslen(filename));
-
-cleanup:
-  if (filename != NULL) free(filename);
-  return (*env)->NewStringUTF(env, "Unavailable");
+  GetLibraryName(Java_org_apache_hadoop_util_NativeCodeLoader_getLibraryName,
+    &filename);
+  if (filename != NULL)
+  {
+    return (*env)->NewString(env, filename, (jsize) wcslen(filename));
+  }
+  else
+  {
+    return (*env)->NewStringUTF(env, "Unavailable");
+  }
 #endif
 #endif
 }
 }

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/resources/common-version-info.properties

@@ -23,3 +23,4 @@ user=${user.name}
 date=${version-info.build.time}
 date=${version-info.build.time}
 url=${version-info.scm.uri}
 url=${version-info.scm.uri}
 srcChecksum=${version-info.source.md5}
 srcChecksum=${version-info.source.md5}
+protocVersion=${protobuf.version}

+ 3 - 1
hadoop-common-project/hadoop-common/src/main/winutils/include/winutils.h

@@ -153,4 +153,6 @@ DWORD ChangeFileModeByMask(__in LPCWSTR path, INT mode);
 DWORD GetLocalGroupsForUser(__in LPCWSTR user,
 DWORD GetLocalGroupsForUser(__in LPCWSTR user,
   __out LPLOCALGROUP_USERS_INFO_0 *groups, __out LPDWORD entries);
   __out LPLOCALGROUP_USERS_INFO_0 *groups, __out LPDWORD entries);
 
 
-BOOL EnablePrivilege(__in LPCWSTR privilegeName);
+BOOL EnablePrivilege(__in LPCWSTR privilegeName);
+
+void GetLibraryName(__in LPCVOID lpAddress, __out LPWSTR *filename);

+ 48 - 0
hadoop-common-project/hadoop-common/src/main/winutils/libwinutils.c

@@ -1709,3 +1709,51 @@ void ReportErrorCode(LPCWSTR func, DWORD err)
   }
   }
   if (msg != NULL) LocalFree(msg);
   if (msg != NULL) LocalFree(msg);
 }
 }
+
+//----------------------------------------------------------------------------
+// Function: GetLibraryName
+//
+// Description:
+//  Given an address, get the file name of the library from which it was loaded.
+//
+// Returns:
+//  None
+//
+// Notes:
+// - The function allocates heap memory and points the filename out parameter to
+//   the newly allocated memory, which will contain the name of the file.
+//
+// - If there is any failure, then the function frees the heap memory it
+//   allocated and sets the filename out parameter to NULL.
+//
+void GetLibraryName(LPCVOID lpAddress, LPWSTR *filename)
+{
+  SIZE_T ret = 0;
+  DWORD size = MAX_PATH;
+  HMODULE mod = NULL;
+  DWORD err = ERROR_SUCCESS;
+
+  MEMORY_BASIC_INFORMATION mbi;
+  ret = VirtualQuery(lpAddress, &mbi, sizeof(mbi));
+  if (ret == 0) goto cleanup;
+  mod = mbi.AllocationBase;
+
+  do {
+    *filename = (LPWSTR) realloc(*filename, size * sizeof(WCHAR));
+    if (*filename == NULL) goto cleanup;
+    GetModuleFileName(mod, *filename, size);
+    size <<= 1;
+    err = GetLastError();
+  } while (err == ERROR_INSUFFICIENT_BUFFER);
+
+  if (err != ERROR_SUCCESS) goto cleanup;
+
+  return;
+
+cleanup:
+  if (*filename != NULL)
+  {
+    free(*filename);
+    *filename = NULL;
+  }
+}

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/apt/CLIMiniCluster.apt.vm

@@ -42,7 +42,7 @@ Hadoop MapReduce Next Generation - CLI MiniCluster.
 $ mvn clean install -DskipTests
 $ mvn clean install -DskipTests
 $ mvn package -Pdist -Dtar -DskipTests -Dmaven.javadoc.skip
 $ mvn package -Pdist -Dtar -DskipTests -Dmaven.javadoc.skip
 +---+
 +---+
-  <<NOTE:>> You will need protoc installed of version 2.4.1 or greater.
+  <<NOTE:>> You will need protoc 2.5.0 installed.
 
 
   The tarball should be available in <<<hadoop-dist/target/>>> directory. 
   The tarball should be available in <<<hadoop-dist/target/>>> directory. 
 
 

+ 42 - 19
hadoop-common-project/hadoop-common/src/site/apt/FileSystemShell.apt.vm

@@ -86,11 +86,14 @@ chgrp
 
 
    Usage: <<<hdfs dfs -chgrp [-R] GROUP URI [URI ...]>>>
    Usage: <<<hdfs dfs -chgrp [-R] GROUP URI [URI ...]>>>
 
 
-   Change group association of files. With -R, make the change recursively
-   through the directory structure. The user must be the owner of files, or
+   Change group association of files. The user must be the owner of files, or
    else a super-user. Additional information is in the
    else a super-user. Additional information is in the
    {{{betterurl}Permissions Guide}}.
    {{{betterurl}Permissions Guide}}.
 
 
+   Options
+
+     * The -R option will make the change recursively through the directory structure.
+
 chmod
 chmod
 
 
    Usage: <<<hdfs dfs -chmod [-R] <MODE[,MODE]... | OCTALMODE> URI [URI ...]>>>
    Usage: <<<hdfs dfs -chmod [-R] <MODE[,MODE]... | OCTALMODE> URI [URI ...]>>>
@@ -100,14 +103,21 @@ chmod
    else a super-user. Additional information is in the
    else a super-user. Additional information is in the
    {{{betterurl}Permissions Guide}}.
    {{{betterurl}Permissions Guide}}.
 
 
+   Options
+
+     * The -R option will make the change recursively through the directory structure.
+
 chown
 chown
 
 
    Usage: <<<hdfs dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]>>>
    Usage: <<<hdfs dfs -chown [-R] [OWNER][:[GROUP]] URI [URI ]>>>
 
 
-   Change the owner of files. With -R, make the change recursively through the
-   directory structure. The user must be a super-user. Additional information
+   Change the owner of files. The user must be a super-user. Additional information
    is in the {{{betterurl}Permissions Guide}}.
    is in the {{{betterurl}Permissions Guide}}.
 
 
+   Options
+
+     * The -R option will make the change recursively through the directory structure.
+
 copyFromLocal
 copyFromLocal
 
 
    Usage: <<<hdfs dfs -copyFromLocal <localsrc> URI>>>
    Usage: <<<hdfs dfs -copyFromLocal <localsrc> URI>>>
@@ -115,6 +125,10 @@ copyFromLocal
    Similar to put command, except that the source is restricted to a local
    Similar to put command, except that the source is restricted to a local
    file reference.
    file reference.
 
 
+   Options:
+
+     * The -f option will overwrite the destination if it already exists.
+
 copyToLocal
 copyToLocal
 
 
    Usage: <<<hdfs dfs -copyToLocal [-ignorecrc] [-crc] URI <localdst> >>>
    Usage: <<<hdfs dfs -copyToLocal [-ignorecrc] [-crc] URI <localdst> >>>
@@ -145,11 +159,15 @@ count
 
 
 cp
 cp
 
 
-   Usage: <<<hdfs dfs -cp URI [URI ...] <dest> >>>
+   Usage: <<<hdfs dfs -cp [-f] URI [URI ...] <dest> >>>
 
 
    Copy files from source to destination. This command allows multiple sources
    Copy files from source to destination. This command allows multiple sources
    as well in which case the destination must be a directory.
    as well in which case the destination must be a directory.
 
 
+    Options:
+
+      * The -f option will overwrite the destination if it already exists.
+
    Example:
    Example:
 
 
      * <<<hdfs dfs -cp /user/hadoop/file1 /user/hadoop/file2>>>
      * <<<hdfs dfs -cp /user/hadoop/file1 /user/hadoop/file2>>>
@@ -232,7 +250,7 @@ ls
 permissions number_of_replicas userid groupid filesize modification_date modification_time filename
 permissions number_of_replicas userid groupid filesize modification_date modification_time filename
 +---+
 +---+
 
 
-   For a directory it returns list of its direct children as in unix.A directory is listed as:
+   For a directory it returns list of its direct children as in Unix. A directory is listed as:
 
 
 +---+
 +---+
 permissions userid groupid modification_date modification_time dirname
 permissions userid groupid modification_date modification_time dirname
@@ -256,8 +274,11 @@ mkdir
 
 
    Usage: <<<hdfs dfs -mkdir [-p] <paths> >>>
    Usage: <<<hdfs dfs -mkdir [-p] <paths> >>>
 
 
-   Takes path uri's as argument and creates directories.  With -p the behavior
-   is much like unix mkdir -p creating parent directories along the path.
+   Takes path uri's as argument and creates directories.
+
+   Options:
+
+     * The -p option behavior is much like Unix mkdir -p, creating parent directories along the path.
 
 
    Example:
    Example:
 
 
@@ -362,8 +383,11 @@ setrep
 
 
    Usage: <<<hdfs dfs -setrep [-R] <path> >>>
    Usage: <<<hdfs dfs -setrep [-R] <path> >>>
 
 
-   Changes the replication factor of a file. -R option is for recursively
-   increasing the replication factor of files within a directory.
+   Changes the replication factor of a file.
+
+   Options:
+
+     * The -R option will recursively increase the replication factor of files within a directory.
 
 
    Example:
    Example:
 
 
@@ -390,8 +414,11 @@ tail
 
 
    Usage: <<<hdfs dfs -tail [-f] URI>>>
    Usage: <<<hdfs dfs -tail [-f] URI>>>
 
 
-   Displays last kilobyte of the file to stdout. -f option can be used as in
-   Unix.
+   Displays last kilobyte of the file to stdout.
+
+   Options:
+
+     * The -f option will output appended data as the file grows, as in Unix.
 
 
    Example:
    Example:
 
 
@@ -406,13 +433,9 @@ test
 
 
    Options:
    Options:
 
 
-*----+------------+
-| -e | check to see if the file exists. Return 0 if true.
-*----+------------+
-| -z | check to see if the file is zero length. Return 0 if true.
-*----+------------+
-| -d | check to see if the path is directory. Return 0 if true.
-*----+------------+
+     * The -e option will check to see if the file exists, returning 0 if true.
+     * The -z option will check to see if the file is zero length, returning 0 if true.
+     * The -d option will check to see if the path is directory, returning 0 if true.
 
 
    Example:
    Example:
 
 

+ 1 - 1
hadoop-common-project/hadoop-common/src/site/apt/SingleCluster.apt.vm

@@ -32,7 +32,7 @@ $ mvn clean install -DskipTests
 $ cd hadoop-mapreduce-project
 $ cd hadoop-mapreduce-project
 $ mvn clean install assembly:assembly -Pnative
 $ mvn clean install assembly:assembly -Pnative
 +---+
 +---+
-  <<NOTE:>> You will need protoc installed of version 2.4.1 or greater.
+  <<NOTE:>> You will need protoc 2.5.0 installed.
 
 
   To ignore the native builds in mapreduce you can omit the <<<-Pnative>>> argument
   To ignore the native builds in mapreduce you can omit the <<<-Pnative>>> argument
   for maven. The tarball should be available in <<<target/>>> directory. 
   for maven. The tarball should be available in <<<target/>>> directory. 

+ 44 - 4
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestHarFileSystemBasics.java

@@ -82,7 +82,7 @@ public class TestHarFileSystemBasics {
     localFileSystem.createNewFile(masterIndexPath);
     localFileSystem.createNewFile(masterIndexPath);
     assertTrue(localFileSystem.exists(masterIndexPath));
     assertTrue(localFileSystem.exists(masterIndexPath));
 
 
-    writeVersionToMasterIndexImpl(HarFileSystem.VERSION);
+    writeVersionToMasterIndexImpl(HarFileSystem.VERSION, masterIndexPath);
 
 
     final HarFileSystem harFileSystem = new HarFileSystem(localFileSystem);
     final HarFileSystem harFileSystem = new HarFileSystem(localFileSystem);
     final URI uri = new URI("har://" + harPath.toString());
     final URI uri = new URI("har://" + harPath.toString());
@@ -90,8 +90,25 @@ public class TestHarFileSystemBasics {
     return harFileSystem;
     return harFileSystem;
   }
   }
 
 
-  private void writeVersionToMasterIndexImpl(int version) throws IOException {
-    final Path masterIndexPath = new Path(harPath, "_masterindex");
+  private HarFileSystem createHarFileSystem(final Configuration conf, Path aHarPath)
+      throws Exception {
+    localFileSystem.mkdirs(aHarPath);
+    final Path indexPath = new Path(aHarPath, "_index");
+    final Path masterIndexPath = new Path(aHarPath, "_masterindex");
+    localFileSystem.createNewFile(indexPath);
+    assertTrue(localFileSystem.exists(indexPath));
+    localFileSystem.createNewFile(masterIndexPath);
+    assertTrue(localFileSystem.exists(masterIndexPath));
+
+    writeVersionToMasterIndexImpl(HarFileSystem.VERSION, masterIndexPath);
+
+    final HarFileSystem harFileSystem = new HarFileSystem(localFileSystem);
+    final URI uri = new URI("har://" + aHarPath.toString());
+    harFileSystem.initialize(uri, conf);
+    return harFileSystem;
+  }
+
+  private void writeVersionToMasterIndexImpl(int version, Path masterIndexPath) throws IOException {
     // write Har version into the master index:
     // write Har version into the master index:
     final FSDataOutputStream fsdos = localFileSystem.create(masterIndexPath);
     final FSDataOutputStream fsdos = localFileSystem.create(masterIndexPath);
     try {
     try {
@@ -172,6 +189,29 @@ public class TestHarFileSystemBasics {
     assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
     assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
   }
   }
 
 
+  @Test
+  public void testPositiveLruMetadataCacheFs() throws Exception {
+    // Init 2nd har file system on the same underlying FS, so the
+    // metadata gets reused:
+    HarFileSystem hfs = new HarFileSystem(localFileSystem);
+    URI uri = new URI("har://" + harPath.toString());
+    hfs.initialize(uri, new Configuration());
+    // the metadata should be reused from cache:
+    assertTrue(hfs.getMetadata() == harFileSystem.getMetadata());
+
+    // Create more hars, until the cache is full + 1; the last creation should evict the first entry from the cache
+    for (int i = 0; i <= hfs.METADATA_CACHE_ENTRIES_DEFAULT; i++) {
+      Path p = new Path(rootPath, "path1/path2/my" + i +".har");
+      createHarFileSystem(conf, p);
+    }
+
+    // The first entry should not be in the cache anymore:
+    hfs = new HarFileSystem(localFileSystem);
+    uri = new URI("har://" + harPath.toString());
+    hfs.initialize(uri, new Configuration());
+    assertTrue(hfs.getMetadata() != harFileSystem.getMetadata());
+  }
+
   @Test
   @Test
   public void testPositiveInitWithoutUnderlyingFS() throws Exception {
   public void testPositiveInitWithoutUnderlyingFS() throws Exception {
     // Init HarFS with no constructor arg, so that the underlying FS object
     // Init HarFS with no constructor arg, so that the underlying FS object
@@ -218,7 +258,7 @@ public class TestHarFileSystemBasics {
     // time with 1 second accuracy:
     // time with 1 second accuracy:
     Thread.sleep(1000);
     Thread.sleep(1000);
     // write an unsupported version:
     // write an unsupported version:
-    writeVersionToMasterIndexImpl(7777);
+    writeVersionToMasterIndexImpl(7777, new Path(harPath, "_masterindex"));
     // init the Har:
     // init the Har:
     final HarFileSystem hfs = new HarFileSystem(localFileSystem);
     final HarFileSystem hfs = new HarFileSystem(localFileSystem);
 
 

+ 17 - 2
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestLocalFileSystem.java

@@ -26,6 +26,7 @@ import org.apache.hadoop.util.StringUtils;
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 import static org.apache.hadoop.fs.FileSystemTestHelper.*;
 
 
 import java.io.*;
 import java.io.*;
+import java.net.URI;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.Random;
 import java.util.Random;
 
 
@@ -363,12 +364,12 @@ public class TestLocalFileSystem {
 
 
     FileStatus status = fileSys.getFileStatus(path);
     FileStatus status = fileSys.getFileStatus(path);
     assertTrue("check we're actually changing something", newModTime != status.getModificationTime());
     assertTrue("check we're actually changing something", newModTime != status.getModificationTime());
-    assertEquals(0, status.getAccessTime());
+    long accessTime = status.getAccessTime();
 
 
     fileSys.setTimes(path, newModTime, -1);
     fileSys.setTimes(path, newModTime, -1);
     status = fileSys.getFileStatus(path);
     status = fileSys.getFileStatus(path);
     assertEquals(newModTime, status.getModificationTime());
     assertEquals(newModTime, status.getModificationTime());
-    assertEquals(0, status.getAccessTime());
+    assertEquals(accessTime, status.getAccessTime());
   }
   }
 
 
   /**
   /**
@@ -520,4 +521,18 @@ public class TestLocalFileSystem {
       fail(s);
       fail(s);
     }
     }
   }
   }
+
+  @Test
+  public void testStripFragmentFromPath() throws Exception {
+    FileSystem fs = FileSystem.getLocal(new Configuration());
+    Path pathQualified = TEST_PATH.makeQualified(fs.getUri(),
+        fs.getWorkingDirectory());
+    Path pathWithFragment = new Path(
+        new URI(pathQualified.toString() + "#glacier"));
+    // Create test file with fragment
+    FileSystemTestHelper.createFile(fs, pathWithFragment);
+    Path resolved = fs.resolvePath(pathWithFragment);
+    assertEquals("resolvePath did not strip fragment from Path", pathQualified,
+        resolved);
+  }
 }
 }

+ 122 - 0
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestStat.java

@@ -0,0 +1,122 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.fs;
+
+import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
+
+import java.io.BufferedReader;
+import java.io.FileNotFoundException;
+import java.io.StringReader;
+
+import org.apache.hadoop.conf.Configuration;
+import org.junit.BeforeClass;
+import org.junit.Test;
+
+public class TestStat {
+
+  private static Stat stat;
+
+  @BeforeClass
+  public static void setup() throws Exception {
+    stat = new Stat(new Path("/dummypath"),
+        4096l, false, FileSystem.get(new Configuration()));
+  }
+
+  private class StatOutput {
+    final String doesNotExist;
+    final String directory;
+    final String file;
+    final String symlink;
+    final String stickydir;
+
+    StatOutput(String doesNotExist, String directory, String file,
+        String symlink, String stickydir) {
+      this.doesNotExist = doesNotExist;
+      this.directory = directory;
+      this.file = file;
+      this.symlink = symlink;
+      this.stickydir = stickydir;
+    }
+
+    void test() throws Exception {
+      BufferedReader br;
+      FileStatus status;
+
+      try {
+        br = new BufferedReader(new StringReader(doesNotExist));
+        stat.parseExecResult(br);
+      } catch (FileNotFoundException e) {
+        // expected
+      }
+
+      br = new BufferedReader(new StringReader(directory));
+      stat.parseExecResult(br);
+      status = stat.getFileStatusForTesting();
+      assertTrue(status.isDirectory());
+
+      br = new BufferedReader(new StringReader(file));
+      stat.parseExecResult(br);
+      status = stat.getFileStatusForTesting();
+      assertTrue(status.isFile());
+
+      br = new BufferedReader(new StringReader(symlink));
+      stat.parseExecResult(br);
+      status = stat.getFileStatusForTesting();
+      assertTrue(status.isSymlink());
+
+      br = new BufferedReader(new StringReader(stickydir));
+      stat.parseExecResult(br);
+      status = stat.getFileStatusForTesting();
+      assertTrue(status.isDirectory());
+      assertTrue(status.getPermission().getStickyBit());
+    }
+  }
+
+  @Test(timeout=10000)
+  public void testStatLinux() throws Exception {
+    StatOutput linux = new StatOutput(
+        "stat: cannot stat `watermelon': No such file or directory",
+        "4096,directory,1373584236,1373586485,755,andrew,root,`.'",
+        "0,regular empty file,1373584228,1373584228,644,andrew,andrew,`target'",
+        "6,symbolic link,1373584236,1373584236,777,andrew,andrew,`link' -> `target'",
+        "4096,directory,1374622334,1375124212,1755,andrew,andrew,`stickydir'");
+    linux.test();
+  }
+
+  @Test(timeout=10000)
+  public void testStatFreeBSD() throws Exception {
+    StatOutput freebsd = new StatOutput(
+        "stat: symtest/link: stat: No such file or directory",
+        "512,Directory,1373583695,1373583669,40755,awang,awang,`link' -> `'",
+        "0,Regular File,1373508937,1373508937,100644,awang,awang,`link' -> `'",
+        "6,Symbolic Link,1373508941,1373508941,120755,awang,awang,`link' -> `target'",
+        "512,Directory,1375139537,1375139537,41755,awang,awang,`link' -> `'");
+    freebsd.test();
+  }
+
+  @Test(timeout=10000)
+  public void testStatFileNotFound() throws Exception {
+    try {
+      stat.getFileStatus();
+      fail("Expected FileNotFoundException");
+    } catch (FileNotFoundException e) {
+      // expected
+    }
+  }
+}

+ 7 - 8
hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/TestSymlinkLocalFS.java

@@ -31,6 +31,7 @@ import java.net.URISyntaxException;
 
 
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.fs.permission.FsPermission;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
+import org.apache.hadoop.security.UserGroupInformation;
 import org.junit.Test;
 import org.junit.Test;
 
 
 /**
 /**
@@ -134,6 +135,7 @@ abstract public class TestSymlinkLocalFS extends SymlinkBaseTest {
     Path fileAbs  = new Path(testBaseDir1()+"/file");
     Path fileAbs  = new Path(testBaseDir1()+"/file");
     Path fileQual = new Path(testURI().toString(), fileAbs);
     Path fileQual = new Path(testURI().toString(), fileAbs);
     Path link     = new Path(testBaseDir1()+"/linkToFile");
     Path link     = new Path(testBaseDir1()+"/linkToFile");
+    Path linkQual = new Path(testURI().toString(), link.toString());
     wrapper.createSymlink(fileAbs, link, false);
     wrapper.createSymlink(fileAbs, link, false);
     // Deleting the link using FileContext currently fails because
     // Deleting the link using FileContext currently fails because
     // resolve looks up LocalFs rather than RawLocalFs for the path 
     // resolve looks up LocalFs rather than RawLocalFs for the path 
@@ -151,18 +153,15 @@ abstract public class TestSymlinkLocalFS extends SymlinkBaseTest {
       // Expected. File's exists method returns false for dangling links
       // Expected. File's exists method returns false for dangling links
     }
     }
     // We can stat a dangling link
     // We can stat a dangling link
+    UserGroupInformation user = UserGroupInformation.getCurrentUser();
     FileStatus fsd = wrapper.getFileLinkStatus(link);
     FileStatus fsd = wrapper.getFileLinkStatus(link);
     assertEquals(fileQual, fsd.getSymlink());
     assertEquals(fileQual, fsd.getSymlink());
     assertTrue(fsd.isSymlink());
     assertTrue(fsd.isSymlink());
     assertFalse(fsd.isDirectory());
     assertFalse(fsd.isDirectory());
-    assertEquals("", fsd.getOwner());
-    assertEquals("", fsd.getGroup());
-    assertEquals(link, fsd.getPath());
-    assertEquals(0, fsd.getLen());
-    assertEquals(0, fsd.getBlockSize());
-    assertEquals(0, fsd.getReplication());
-    assertEquals(0, fsd.getAccessTime());
-    assertEquals(FsPermission.getDefault(), fsd.getPermission());
+    assertEquals(user.getUserName(), fsd.getOwner());
+    // Compare against user's primary group
+    assertEquals(user.getGroupNames()[0], fsd.getGroup());
+    assertEquals(linkQual, fsd.getPath());
     // Accessing the link 
     // Accessing the link 
     try {
     try {
       readFile(link);
       readFile(link);

+ 10 - 2
hadoop-common-project/hadoop-common/src/test/resources/testConf.xml

@@ -296,7 +296,11 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*modification times, ownership and the mode.( )*</expected-output>
+          <expected-output>^( |\t)*modification times, ownership and the mode. Passing -f( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*overwrites the destination if it already exists.( )*</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>
@@ -400,7 +404,11 @@
         </comparator>
         </comparator>
         <comparator>
         <comparator>
           <type>RegexpComparator</type>
           <type>RegexpComparator</type>
-          <expected-output>^( |\t)*ownership and the mode.( )*</expected-output>
+          <expected-output>^( |\t)*ownership and the mode. Passing -f overwrites( )*</expected-output>
+        </comparator>
+        <comparator>
+          <type>RegexpComparator</type>
+          <expected-output>^( |\t)*the destination if it already exists.( )*</expected-output>
         </comparator>
         </comparator>
       </comparators>
       </comparators>
     </test>
     </test>

+ 55 - 0
hadoop-common-project/hadoop-minikdc/pom.xml

@@ -0,0 +1,55 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!--
+  Licensed under the Apache License, Version 2.0 (the "License");
+  you may not use this file except in compliance with the License.
+  You may obtain a copy of the License at
+
+    http://www.apache.org/licenses/LICENSE-2.0
+
+  Unless required by applicable law or agreed to in writing, software
+  distributed under the License is distributed on an "AS IS" BASIS,
+  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  See the License for the specific language governing permissions and
+  limitations under the License. See accompanying LICENSE file.
+-->
+<project xmlns="http://maven.apache.org/POM/4.0.0"
+         xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+         xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xsd/maven-4.0.0.xsd">
+  <parent>
+    <groupId>org.apache.hadoop</groupId>
+    <artifactId>hadoop-project</artifactId>
+    <version>3.0.0-SNAPSHOT</version>
+    <relativePath>../../hadoop-project</relativePath>
+  </parent>
+  <modelVersion>4.0.0</modelVersion>
+  <groupId>org.apache.hadoop</groupId>
+  <artifactId>hadoop-minikdc</artifactId>
+  <version>3.0.0-SNAPSHOT</version>
+  <description>Apache Hadoop MiniKDC</description>
+  <name>Apache Hadoop MiniKDC</name>
+  <packaging>jar</packaging>
+
+  <dependencies>
+    <dependency>
+      <groupId>commons-io</groupId>
+      <artifactId>commons-io</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.apache.directory.server</groupId>
+      <artifactId>apacheds-all</artifactId>
+      <version>2.0.0-M14</version>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>org.slf4j</groupId>
+      <artifactId>slf4j-log4j12</artifactId>
+      <scope>compile</scope>
+    </dependency>
+    <dependency>
+      <groupId>junit</groupId>
+      <artifactId>junit</artifactId>
+      <scope>compile</scope>
+    </dependency>
+  </dependencies>
+</project>

+ 42 - 0
hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytab.java

@@ -0,0 +1,42 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.directory.server.kerberos.shared.keytab;
+
+import java.io.File;
+import java.io.IOException;
+import java.nio.ByteBuffer;
+
+//This is a hack for ApacheDS 2.0.0-M14 to be able to create
+//keytab files with more than one principal.
+//It needs to be in this package because the KeytabEncoder class is package 
+// private.
+//This class can be removed once jira DIRSERVER-1882
+// (https://issues.apache.org/jira/browse/DIRSERVER-1882) solved
+public class HackedKeytab extends Keytab {
+
+  private byte[] keytabVersion = VERSION_52;
+
+  public void write( File file, int principalCount ) throws IOException
+  {
+    HackedKeytabEncoder writer = new HackedKeytabEncoder();
+    ByteBuffer buffer = writer.write( keytabVersion, getEntries(),
+            principalCount );
+    writeFile( buffer, file );
+  }
+
+}

+ 121 - 0
hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/directory/server/kerberos/shared/keytab/HackedKeytabEncoder.java

@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.directory.server.kerberos.shared.keytab;
+
+import org.apache.directory.shared.kerberos.components.EncryptionKey;
+
+import java.nio.ByteBuffer;
+import java.util.Iterator;
+import java.util.List;
+
+//This is a hack for ApacheDS 2.0.0-M14 to be able to create
+//keytab files with more than one principal.
+//It needs to be in this package because the KeytabEncoder class is package 
+// private.
+//This class can be removed once jira DIRSERVER-1882
+// (https://issues.apache.org/jira/browse/DIRSERVER-1882) solved
+class HackedKeytabEncoder extends KeytabEncoder {
+
+  ByteBuffer write( byte[] keytabVersion, List<KeytabEntry> entries,
+                    int principalCount )
+  {
+    ByteBuffer buffer = ByteBuffer.allocate( 512 * principalCount);
+    putKeytabVersion(buffer, keytabVersion);
+    putKeytabEntries( buffer, entries );
+    buffer.flip();
+    return buffer;
+  }
+
+  private void putKeytabVersion( ByteBuffer buffer, byte[] version )
+  {
+    buffer.put( version );
+  }
+
+  private void putKeytabEntries( ByteBuffer buffer, List<KeytabEntry> entries )
+  {
+    Iterator<KeytabEntry> iterator = entries.iterator();
+
+    while ( iterator.hasNext() )
+    {
+      ByteBuffer entryBuffer = putKeytabEntry( iterator.next() );
+      int size = entryBuffer.position();
+
+      entryBuffer.flip();
+
+      buffer.putInt( size );
+      buffer.put( entryBuffer );
+    }
+  }
+
+  private ByteBuffer putKeytabEntry( KeytabEntry entry )
+  {
+    ByteBuffer buffer = ByteBuffer.allocate( 100 );
+
+    putPrincipalName( buffer, entry.getPrincipalName() );
+
+    buffer.putInt( ( int ) entry.getPrincipalType() );
+
+    buffer.putInt( ( int ) ( entry.getTimeStamp().getTime() / 1000 ) );
+
+    buffer.put( entry.getKeyVersion() );
+
+    putKeyBlock( buffer, entry.getKey() );
+
+    return buffer;
+  }
+
+  private void putPrincipalName( ByteBuffer buffer, String principalName )
+  {
+    String[] split = principalName.split("@");
+    String nameComponent = split[0];
+    String realm = split[1];
+
+    String[] nameComponents = nameComponent.split( "/" );
+
+    // increment for v1
+    buffer.putShort( ( short ) nameComponents.length );
+
+    putCountedString( buffer, realm );
+    // write components
+
+    for ( int ii = 0; ii < nameComponents.length; ii++ )
+    {
+      putCountedString( buffer, nameComponents[ii] );
+    }
+  }
+
+  private void putKeyBlock( ByteBuffer buffer, EncryptionKey key )
+  {
+    buffer.putShort( ( short ) key.getKeyType().getValue() );
+    putCountedBytes( buffer, key.getKeyValue() );
+  }
+
+  private void putCountedString( ByteBuffer buffer, String string )
+  {
+    byte[] data = string.getBytes();
+    buffer.putShort( ( short ) data.length );
+    buffer.put( data );
+  }
+
+  private void putCountedBytes( ByteBuffer buffer, byte[] data )
+  {
+    buffer.putShort( ( short ) data.length );
+    buffer.put( data );
+  }
+
+}

+ 86 - 0
hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/KerberosSecurityTestcase.java

@@ -0,0 +1,86 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.minikdc;
+
+import org.junit.After;
+import org.junit.Before;
+
+import java.io.File;
+import java.util.Properties;
+
+/**
+ * KerberosSecurityTestcase provides a base class for using MiniKdc with other
+ * testcases. KerberosSecurityTestcase starts the MiniKdc (@Before) before
+ * running tests, and stop the MiniKdc (@After) after the testcases, using
+ * default settings (working dir and kdc configurations).
+ * <p>
+ * Users can directly inherit this class and implement their own test functions
+ * using the default settings, or override functions getTestDir() and
+ * createMiniKdcConf() to provide new settings.
+ *
+ */
+
+public class KerberosSecurityTestcase {
+  private MiniKdc kdc;
+  private File workDir;
+  private Properties conf;
+
+  @Before
+  public void startMiniKdc() throws Exception {
+    createTestDir();
+    createMiniKdcConf();
+
+    kdc = new MiniKdc(conf, workDir);
+    kdc.start();
+  }
+
+  /**
+   * Create a working directory, it should be the build directory. Under
+   * this directory an ApacheDS working directory will be created, this
+   * directory will be deleted when the MiniKdc stops.
+   */
+  public void createTestDir() {
+    workDir = new File(System.getProperty("test.dir", "target"));
+  }
+
+  /**
+   * Create a Kdc configuration
+   */
+  public void createMiniKdcConf() {
+    conf = MiniKdc.createConf();
+  }
+
+  @After
+  public void stopMiniKdc() {
+    if (kdc != null) {
+      kdc.stop();
+    }
+  }
+
+  public MiniKdc getKdc() {
+    return kdc;
+  }
+
+  public File getWorkDir() {
+    return workDir;
+  }
+
+  public Properties getConf() {
+    return conf;
+  }
+}

+ 534 - 0
hadoop-common-project/hadoop-minikdc/src/main/java/org/apache/hadoop/minikdc/MiniKdc.java

@@ -0,0 +1,534 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.minikdc;
+import org.apache.commons.io.FileUtils;
+import org.apache.commons.io.IOUtils;
+import org.apache.commons.lang.text.StrSubstitutor;
+import org.apache.directory.api.ldap.model.schema.SchemaManager;
+import org.apache.directory.api.ldap.schemaextractor.SchemaLdifExtractor;
+import org.apache.directory.api.ldap.schemaextractor.impl.DefaultSchemaLdifExtractor;
+import org.apache.directory.api.ldap.schemaloader.LdifSchemaLoader;
+import org.apache.directory.api.ldap.schemamanager.impl.DefaultSchemaManager;
+import org.apache.directory.server.constants.ServerDNConstants;
+import org.apache.directory.server.core.DefaultDirectoryService;
+import org.apache.directory.server.core.api.CacheService;
+import org.apache.directory.server.core.api.DirectoryService;
+import org.apache.directory.server.core.api.InstanceLayout;
+import org.apache.directory.server.core.api.schema.SchemaPartition;
+import org.apache.directory.server.core.kerberos.KeyDerivationInterceptor;
+import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmIndex;
+import org.apache.directory.server.core.partition.impl.btree.jdbm.JdbmPartition;
+import org.apache.directory.server.core.partition.ldif.LdifPartition;
+import org.apache.directory.server.kerberos.kdc.KdcServer;
+import org.apache.directory.server.kerberos.shared.crypto.encryption.KerberosKeyFactory;
+import org.apache.directory.server.kerberos.shared.keytab.HackedKeytab;
+import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
+import org.apache.directory.server.protocol.shared.transport.TcpTransport;
+import org.apache.directory.server.protocol.shared.transport.UdpTransport;
+import org.apache.directory.shared.kerberos.KerberosTime;
+import org.apache.directory.shared.kerberos.codec.types.EncryptionType;
+import org.apache.directory.shared.kerberos.components.EncryptionKey;
+import org.apache.directory.api.ldap.model.entry.DefaultEntry;
+import org.apache.directory.api.ldap.model.entry.Entry;
+import org.apache.directory.api.ldap.model.ldif.LdifEntry;
+import org.apache.directory.api.ldap.model.ldif.LdifReader;
+import org.apache.directory.api.ldap.model.name.Dn;
+import org.apache.directory.api.ldap.model.schema.registries.SchemaLoader;
+import org.slf4j.Logger;
+import org.slf4j.LoggerFactory;
+
+import java.io.BufferedReader;
+import java.io.File;
+import java.io.FileReader;
+import java.io.InputStream;
+import java.io.InputStreamReader;
+import java.io.StringReader;
+import java.net.InetAddress;
+import java.net.ServerSocket;
+import java.text.MessageFormat;
+import java.util.ArrayList;
+import java.util.Arrays;
+import java.util.HashMap;
+import java.util.HashSet;
+import java.util.List;
+import java.util.Map;
+import java.util.Properties;
+import java.util.Set;
+import java.util.UUID;
+
+/**
+ * Mini KDC based on Apache Directory Server that can be embedded in testcases
+ * or used from command line as a standalone KDC.
+ * <p/>
+ * <b>From within testcases:</b>
+ * <p/>
+ * MiniKdc sets 2 System properties when started and un-sets them when stopped:
+ * <ul>
+ *   <li>java.security.krb5.conf: set to the MiniKDC real/host/port</li>
+ *   <li>sun.security.krb5.debug: set to the debug value provided in the
+ *   configuration</li>
+ * </ul>
+ * Because of this, multiple MiniKdc instances cannot be started in parallel.
+ * For example, running testcases in parallel that start a KDC each. To
+ * accomplish this a single MiniKdc should be used for all testcases running
+ * in parallel.
+ * <p/>
+ * MiniKdc default configuration values are:
+ * <ul>
+ *   <li>org.name=EXAMPLE (used to create the REALM)</li>
+ *   <li>org.domain=COM (used to create the REALM)</li>
+ *   <li>kdc.bind.address=localhost</li>
+ *   <li>kdc.port=0 (ephemeral port)</li>
+ *   <li>instance=DefaultKrbServer</li>
+ *   <li>max.ticket.lifetime=86400000 (1 day)</li>
+ *   <li>max.renewable.lifetime=604800000 (7 days)</li>
+ *   <li>transport=TCP</li>
+ *   <li>debug=false</li>
+ * </ul>
+ * The generated krb5.conf forces TCP connections.
+ * <p/>
+ */
+public class MiniKdc {
+
+  public static void main(String[] args) throws  Exception {
+    if (args.length < 4) {
+      System.out.println("Arguments: <WORKDIR> <MINIKDCPROPERTIES> " +
+              "<KEYTABFILE> [<PRINCIPALS>]+");
+      System.exit(1);
+    }
+    File workDir = new File(args[0]);
+    if (!workDir.exists()) {
+      throw new RuntimeException("Specified work directory does not exists: "
+              + workDir.getAbsolutePath());
+    }
+    Properties conf = createConf();
+    File file = new File(args[1]);
+    if (!file.exists()) {
+      throw new RuntimeException("Specified configuration does not exists: "
+              + file.getAbsolutePath());
+    }
+    Properties userConf = new Properties();
+    userConf.load(new FileReader(file));
+    for (Map.Entry entry : userConf.entrySet()) {
+      conf.put(entry.getKey(), entry.getValue());
+    }
+    final MiniKdc miniKdc = new MiniKdc(conf, workDir);
+    miniKdc.start();
+    File krb5conf = new File(workDir, "krb5.conf");
+    if (miniKdc.getKrb5conf().renameTo(krb5conf)) {
+      File keytabFile = new File(args[2]).getAbsoluteFile();
+      String[] principals = new String[args.length - 3];
+      System.arraycopy(args, 3, principals, 0, args.length - 3);
+      miniKdc.createPrincipal(keytabFile, principals);
+      System.out.println();
+      System.out.println("Standalone MiniKdc Running");
+      System.out.println("---------------------------------------------------");
+      System.out.println("  Realm           : " + miniKdc.getRealm());
+      System.out.println("  Running at      : " + miniKdc.getHost() + ":" +
+              miniKdc.getHost());
+      System.out.println("  krb5conf        : " + krb5conf);
+      System.out.println();
+      System.out.println("  created keytab  : " + keytabFile);
+      System.out.println("  with principals : " + Arrays.asList(principals));
+      System.out.println();
+      System.out.println(" Do <CTRL-C> or kill <PID> to stop it");
+      System.out.println("---------------------------------------------------");
+      System.out.println();
+      Runtime.getRuntime().addShutdownHook(new Thread() {
+        @Override
+        public void run() {
+          miniKdc.stop();
+        }
+      });
+    } else {
+      throw new RuntimeException("Cannot rename KDC's krb5conf to "
+              + krb5conf.getAbsolutePath());
+    }
+  }
+
+  private static final Logger LOG = LoggerFactory.getLogger(MiniKdc.class);
+
+  public static final String ORG_NAME = "org.name";
+  public static final String ORG_DOMAIN = "org.domain";
+  public static final String KDC_BIND_ADDRESS = "kdc.bind.address";
+  public static final String KDC_PORT = "kdc.port";
+  public static final String INSTANCE = "instance";
+  public static final String MAX_TICKET_LIFETIME = "max.ticket.lifetime";
+  public static final String MAX_RENEWABLE_LIFETIME = "max.renewable.lifetime";
+  public static final String TRANSPORT = "transport";
+  public static final String DEBUG = "debug";
+
+  private static final Set<String> PROPERTIES = new HashSet<String>();
+  private static final Properties DEFAULT_CONFIG = new Properties();
+
+  static {
+    PROPERTIES.add(ORG_NAME);
+    PROPERTIES.add(ORG_DOMAIN);
+    PROPERTIES.add(KDC_BIND_ADDRESS);
+    PROPERTIES.add(KDC_BIND_ADDRESS);
+    PROPERTIES.add(KDC_PORT);
+    PROPERTIES.add(INSTANCE);
+    PROPERTIES.add(TRANSPORT);
+    PROPERTIES.add(MAX_TICKET_LIFETIME);
+    PROPERTIES.add(MAX_RENEWABLE_LIFETIME);
+
+    DEFAULT_CONFIG.setProperty(KDC_BIND_ADDRESS, "localhost");
+    DEFAULT_CONFIG.setProperty(KDC_PORT, "0");
+    DEFAULT_CONFIG.setProperty(INSTANCE, "DefaultKrbServer");
+    DEFAULT_CONFIG.setProperty(ORG_NAME, "EXAMPLE");
+    DEFAULT_CONFIG.setProperty(ORG_DOMAIN, "COM");
+    DEFAULT_CONFIG.setProperty(TRANSPORT, "TCP");
+    DEFAULT_CONFIG.setProperty(MAX_TICKET_LIFETIME, "86400000");
+    DEFAULT_CONFIG.setProperty(MAX_RENEWABLE_LIFETIME, "604800000");
+    DEFAULT_CONFIG.setProperty(DEBUG, "false");
+  }
+
+  /**
+   * Convenience method that returns MiniKdc default configuration.
+   * <p/>
+   * The returned configuration is a copy, it can be customized before using
+   * it to create a MiniKdc.
+   * @return a MiniKdc default configuration.
+   */
+  public static Properties createConf() {
+    return (Properties) DEFAULT_CONFIG.clone();
+  }
+
+  private Properties conf;
+  private DirectoryService ds;
+  private KdcServer kdc;
+  private int port;
+  private String realm;
+  private File workDir;
+  private File krb5conf;
+
+  /**
+   * Creates a MiniKdc.
+   *
+   * @param conf MiniKdc configuration.
+   * @param workDir working directory, it should be the build directory. Under
+   * this directory an ApacheDS working directory will be created, this
+   * directory will be deleted when the MiniKdc stops.
+   * @throws Exception thrown if the MiniKdc could not be created.
+   */
+  public MiniKdc(Properties conf, File workDir) throws Exception {
+    if (!conf.keySet().containsAll(PROPERTIES)) {
+      Set<String> missingProperties = new HashSet<String>(PROPERTIES);
+      missingProperties.removeAll(conf.keySet());
+      throw new IllegalArgumentException("Missing configuration properties: "
+              + missingProperties);
+    }
+    this.workDir = new File(workDir, Long.toString(System.currentTimeMillis()));
+    if (! workDir.exists()
+            && ! workDir.mkdirs()) {
+      throw new RuntimeException("Cannot create directory " + workDir);
+    }
+    LOG.info("Configuration:");
+    LOG.info("---------------------------------------------------------------");
+    for (Map.Entry entry : conf.entrySet()) {
+      LOG.info("  {}: {}", entry.getKey(), entry.getValue());
+    }
+    LOG.info("---------------------------------------------------------------");
+    this.conf = conf;
+    port = Integer.parseInt(conf.getProperty(KDC_PORT));
+    if (port == 0) {
+      ServerSocket ss = new ServerSocket(0, 1, InetAddress.getByName
+              (conf.getProperty(KDC_BIND_ADDRESS)));
+      port = ss.getLocalPort();
+      ss.close();
+    }
+    String orgName= conf.getProperty(ORG_NAME);
+    String orgDomain = conf.getProperty(ORG_DOMAIN);
+    realm = orgName.toUpperCase() + "." + orgDomain.toUpperCase();
+  }
+
+  /**
+   * Returns the port of the MiniKdc.
+   *
+   * @return the port of the MiniKdc.
+   */
+  public int getPort() {
+    return port;
+  }
+
+  /**
+   * Returns the host of the MiniKdc.
+   *
+   * @return the host of the MiniKdc.
+   */
+  public String getHost() {
+    return conf.getProperty(KDC_BIND_ADDRESS);
+  }
+
+  /**
+   * Returns the realm of the MiniKdc.
+   *
+   * @return the realm of the MiniKdc.
+   */
+  public String getRealm() {
+    return realm;
+  }
+
+  public File getKrb5conf() {
+    return krb5conf;
+  }
+
+  /**
+   * Starts the MiniKdc.
+   *
+   * @throws Exception thrown if the MiniKdc could not be started.
+   */
+  public synchronized void start() throws Exception {
+    if (kdc != null) {
+      throw new RuntimeException("Already started");
+    }
+    initDirectoryService();
+    initKDCServer();
+  }
+
+  @SuppressWarnings("unchecked")
+  private void initDirectoryService() throws Exception {
+    ds = new DefaultDirectoryService();
+    ds.setInstanceLayout(new InstanceLayout(workDir));
+
+    CacheService cacheService = new CacheService();
+    ds.setCacheService(cacheService);
+
+    // first load the schema
+    InstanceLayout instanceLayout = ds.getInstanceLayout();
+    File schemaPartitionDirectory = new File(
+            instanceLayout.getPartitionsDirectory(), "schema");
+    SchemaLdifExtractor extractor = new DefaultSchemaLdifExtractor(
+            instanceLayout.getPartitionsDirectory());
+    extractor.extractOrCopy();
+
+    SchemaLoader loader = new LdifSchemaLoader(schemaPartitionDirectory);
+    SchemaManager schemaManager = new DefaultSchemaManager(loader);
+    schemaManager.loadAllEnabled();
+    ds.setSchemaManager(schemaManager);
+    // Init the LdifPartition with schema
+    LdifPartition schemaLdifPartition = new LdifPartition(schemaManager);
+    schemaLdifPartition.setPartitionPath(schemaPartitionDirectory.toURI());
+
+    // The schema partition
+    SchemaPartition schemaPartition = new SchemaPartition(schemaManager);
+    schemaPartition.setWrappedPartition(schemaLdifPartition);
+    ds.setSchemaPartition(schemaPartition);
+
+    JdbmPartition systemPartition = new JdbmPartition(ds.getSchemaManager());
+    systemPartition.setId("system");
+    systemPartition.setPartitionPath(new File(
+            ds.getInstanceLayout().getPartitionsDirectory(),
+            systemPartition.getId()).toURI());
+    systemPartition.setSuffixDn(new Dn(ServerDNConstants.SYSTEM_DN));
+    systemPartition.setSchemaManager(ds.getSchemaManager());
+    ds.setSystemPartition(systemPartition);
+
+    ds.getChangeLog().setEnabled(false);
+    ds.setDenormalizeOpAttrsEnabled(true);
+    ds.addLast(new KeyDerivationInterceptor());
+
+    // create one partition
+    String orgName= conf.getProperty(ORG_NAME).toLowerCase();
+    String orgDomain = conf.getProperty(ORG_DOMAIN).toLowerCase();
+
+    JdbmPartition partition = new JdbmPartition(ds.getSchemaManager());
+    partition.setId(orgName);
+    partition.setPartitionPath(new File(
+            ds.getInstanceLayout().getPartitionsDirectory(), orgName).toURI());
+    partition.setSuffixDn(new Dn("dc=" + orgName + ",dc=" + orgDomain));
+    ds.addPartition(partition);
+    // indexes
+    Set indexedAttributes = new HashSet();
+    indexedAttributes.add(new JdbmIndex<String, Entry>("objectClass", false));
+    indexedAttributes.add(new JdbmIndex<String, Entry>("dc", false));
+    indexedAttributes.add(new JdbmIndex<String, Entry>("ou", false));
+    partition.setIndexedAttributes(indexedAttributes);
+
+    // And start the ds
+    ds.setInstanceId(conf.getProperty(INSTANCE));
+    ds.startup();
+    // context entry, after ds.startup()
+    Dn dn = new Dn("dc=" + orgName + ",dc=" + orgDomain);
+    Entry entry = ds.newEntry(dn);
+    entry.add("objectClass", "top", "domain");
+    entry.add("dc", orgName);
+    ds.getAdminSession().add(entry);
+  }
+
+  private void initKDCServer() throws Exception {
+    String orgName= conf.getProperty(ORG_NAME);
+    String orgDomain = conf.getProperty(ORG_DOMAIN);
+    String bindAddress = conf.getProperty(KDC_BIND_ADDRESS);
+    final Map<String, String> map = new HashMap<String, String>();
+    map.put("0", orgName.toLowerCase());
+    map.put("1", orgDomain.toLowerCase());
+    map.put("2", orgName.toUpperCase());
+    map.put("3", orgDomain.toUpperCase());
+    map.put("4", bindAddress);
+
+    ClassLoader cl = Thread.currentThread().getContextClassLoader();
+    InputStream is = cl.getResourceAsStream("minikdc.ldiff");
+
+    SchemaManager schemaManager = ds.getSchemaManager();
+    final String content = StrSubstitutor.replace(IOUtils.toString(is), map);
+    LdifReader reader = new LdifReader(new StringReader(content));
+    for (LdifEntry ldifEntry : reader) {
+      ds.getAdminSession().add(new DefaultEntry(schemaManager,
+              ldifEntry.getEntry()));
+    }
+
+    kdc = new KdcServer();
+    kdc.setDirectoryService(ds);
+
+    // transport
+    String transport = conf.getProperty(TRANSPORT);
+    if (transport.trim().equals("TCP")) {
+      kdc.addTransports(new TcpTransport(bindAddress, port, 3, 50));
+    } else if (transport.trim().equals("UDP")) {
+      kdc.addTransports(new UdpTransport(port));
+    } else {
+      throw new IllegalArgumentException("Invalid transport: " + transport);
+    }
+    kdc.setServiceName(conf.getProperty(INSTANCE));
+    kdc.getConfig().setMaximumRenewableLifetime(
+            Long.parseLong(conf.getProperty(MAX_RENEWABLE_LIFETIME)));
+    kdc.getConfig().setMaximumTicketLifetime(
+            Long.parseLong(conf.getProperty(MAX_TICKET_LIFETIME)));
+
+    kdc.getConfig().setPaEncTimestampRequired(false);
+    kdc.start();
+
+    StringBuilder sb = new StringBuilder();
+    is = cl.getResourceAsStream("minikdc-krb5.conf");
+    BufferedReader r = new BufferedReader(new InputStreamReader(is));
+    String line = r.readLine();
+    while (line != null) {
+      sb.append(line).append("{3}");
+      line = r.readLine();
+    }
+    r.close();
+    krb5conf = new File(workDir, "krb5.conf").getAbsoluteFile();
+    FileUtils.writeStringToFile(krb5conf,
+            MessageFormat.format(sb.toString(), getRealm(), getHost(),
+                    Integer.toString(getPort()), System.getProperty("line.separator")));
+    System.setProperty("java.security.krb5.conf", krb5conf.getAbsolutePath());
+
+    System.setProperty("sun.security.krb5.debug", conf.getProperty(DEBUG,
+            "false"));
+    LOG.info("MiniKdc listening at port: {}", getPort());
+    LOG.info("MiniKdc setting JVM krb5.conf to: {}",
+            krb5conf.getAbsolutePath());
+  }
+
+  /**
+   * Stops the MiniKdc
+   * @throws Exception
+   */
+  public synchronized void stop() {
+    if (kdc != null) {
+      System.getProperties().remove("java.security.krb5.conf");
+      System.getProperties().remove("sun.security.krb5.debug");
+      kdc.stop();
+      try {
+        ds.shutdown();
+      } catch (Exception ex) {
+        LOG.error("Could not shutdown ApacheDS properly: {}", ex.toString(),
+                ex);
+      }
+    }
+    delete(workDir);
+  }
+
+  private void delete(File f) {
+    if (f.isFile()) {
+      if (! f.delete()) {
+        LOG.warn("WARNING: cannot delete file " + f.getAbsolutePath());
+      }
+    } else {
+      for (File c: f.listFiles()) {
+        delete(c);
+      }
+      if (! f.delete()) {
+        LOG.warn("WARNING: cannot delete directory " + f.getAbsolutePath());
+      }
+    }
+  }
+
+  /**
+   * Creates a principal in the KDC with the specified user and password.
+   *
+   * @param principal principal name, do not include the domain.
+   * @param password password.
+   * @throws Exception thrown if the principal could not be created.
+   */
+  public synchronized void createPrincipal(String principal, String password)
+          throws Exception {
+    String orgName= conf.getProperty(ORG_NAME);
+    String orgDomain = conf.getProperty(ORG_DOMAIN);
+    String baseDn = "ou=users,dc=" + orgName.toLowerCase() + ",dc=" +
+            orgDomain.toLowerCase();
+    String content = "dn: uid=" + principal + "," + baseDn + "\n" +
+            "objectClass: top\n" +
+            "objectClass: person\n" +
+            "objectClass: inetOrgPerson\n" +
+            "objectClass: krb5principal\n" +
+            "objectClass: krb5kdcentry\n" +
+            "cn: " + principal + "\n" +
+            "sn: " + principal + "\n" +
+            "uid: " + principal + "\n" +
+            "userPassword: " + password + "\n" +
+            "krb5PrincipalName: " + principal + "@" + getRealm() + "\n" +
+            "krb5KeyVersionNumber: 0";
+
+    for (LdifEntry ldifEntry : new LdifReader(new StringReader(content))) {
+      ds.getAdminSession().add(new DefaultEntry(ds.getSchemaManager(),
+              ldifEntry.getEntry()));
+    }
+  }
+
+  /**
+   * Creates  multiple principals in the KDC and adds them to a keytab file.
+   *
+   * @param keytabFile keytab file to add the created principal.s
+   * @param principals principals to add to the KDC, do not include the domain.
+   * @throws Exception thrown if the principals or the keytab file could not be
+   * created.
+   */
+  public void createPrincipal(File keytabFile, String ... principals)
+          throws Exception {
+    String generatedPassword = UUID.randomUUID().toString();
+    HackedKeytab keytab = new HackedKeytab();
+    List<KeytabEntry> entries = new ArrayList<KeytabEntry>();
+    for (String principal : principals) {
+      createPrincipal(principal, generatedPassword);
+      principal = principal + "@" + getRealm();
+      KerberosTime timestamp = new KerberosTime();
+      for (Map.Entry<EncryptionType, EncryptionKey> entry : KerberosKeyFactory
+              .getKerberosKeys(principal, generatedPassword).entrySet()) {
+        EncryptionKey ekey = entry.getValue();
+        byte keyVersion = (byte) ekey.getKeyVersion();
+        entries.add(new KeytabEntry(principal, 1L, timestamp, keyVersion,
+                ekey));
+      }
+    }
+    keytab.setEntries(entries);
+    keytab.write(keytabFile, principals.length);
+  }
+}

+ 31 - 0
hadoop-common-project/hadoop-minikdc/src/main/resources/log4j.properties

@@ -0,0 +1,31 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+# STDOUT Appender
+log4j.appender.stdout=org.apache.log4j.ConsoleAppender
+log4j.appender.stdout.Target=System.err
+log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
+log4j.appender.stdout.layout.ConversionPattern=%d{ISO8601} %-5p %c{1} - %m%n
+
+log4j.rootLogger=INFO, stdout
+
+# Switching off most of Apache DS logqing which is QUITE verbose
+log4j.logger.org.apache.directory=OFF
+log4j.logger.org.apache.directory.server.kerberos=INFO, stdout
+log4j.additivity.org.apache.directory=false
+log4j.logger.net.sf.ehcache=INFO, stdout

+ 25 - 0
hadoop-common-project/hadoop-minikdc/src/main/resources/minikdc-krb5.conf

@@ -0,0 +1,25 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+[libdefaults]
+    default_realm = {0}
+    udp_preference_limit = 1
+
+[realms]
+    {0} = '{'
+        kdc = {1}:{2}
+    '}'

+ 47 - 0
hadoop-common-project/hadoop-minikdc/src/main/resources/minikdc.ldiff

@@ -0,0 +1,47 @@
+#
+# Licensed to the Apache Software Foundation (ASF) under one
+# or more contributor license agreements.  See the NOTICE file
+# distributed with this work for additional information
+# regarding copyright ownership.  The ASF licenses this file
+# to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance
+# with the License.  You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+dn: ou=users,dc=${0},dc=${1}
+objectClass: organizationalUnit
+objectClass: top
+ou: users
+
+dn: uid=krbtgt,ou=users,dc=${0},dc=${1}
+objectClass: top
+objectClass: person
+objectClass: inetOrgPerson
+objectClass: krb5principal
+objectClass: krb5kdcentry
+cn: KDC Service
+sn: Service
+uid: krbtgt
+userPassword: secret
+krb5PrincipalName: krbtgt/${2}.${3}@${2}.${3}
+krb5KeyVersionNumber: 0
+
+dn: uid=ldap,ou=users,dc=${0},dc=${1}
+objectClass: top
+objectClass: person
+objectClass: inetOrgPerson
+objectClass: krb5principal
+objectClass: krb5kdcentry
+cn: LDAP
+sn: Service
+uid: ldap
+userPassword: secret
+krb5PrincipalName: ldap/${4}@${2}.${3}
+krb5KeyVersionNumber: 0

+ 163 - 0
hadoop-common-project/hadoop-minikdc/src/test/java/org/apache/hadoop/minikdc/TestMiniKdc.java

@@ -0,0 +1,163 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.minikdc;
+
+import org.apache.directory.server.kerberos.shared.keytab.Keytab;
+import org.apache.directory.server.kerberos.shared.keytab.KeytabEntry;
+import org.junit.Assert;
+import org.junit.Test;
+
+import javax.security.auth.Subject;
+import javax.security.auth.kerberos.KerberosPrincipal;
+import javax.security.auth.login.AppConfigurationEntry;
+import javax.security.auth.login.Configuration;
+import javax.security.auth.login.LoginContext;
+import java.io.File;
+import java.security.Principal;
+import java.util.*;
+
+public class TestMiniKdc extends KerberosSecurityTestcase {
+
+  @Test
+  public void testMiniKdcStart() {
+    MiniKdc kdc = getKdc();
+    Assert.assertNotSame(0, kdc.getPort());
+  }
+
+  @Test
+  public void testKeytabGen() throws Exception {
+    MiniKdc kdc = getKdc();
+    File workDir = getWorkDir();
+
+    kdc.createPrincipal(new File(workDir, "keytab"), "foo/bar", "bar/foo");
+    Keytab kt = Keytab.read(new File(workDir, "keytab"));
+    Set<String> principals = new HashSet<String>();
+    for (KeytabEntry entry : kt.getEntries()) {
+      principals.add(entry.getPrincipalName());
+    }
+    //here principals use \ instead of /
+    //because org.apache.directory.server.kerberos.shared.keytab.KeytabDecoder
+    // .getPrincipalName(IoBuffer buffer) use \\ when generates principal
+    Assert.assertEquals(new HashSet<String>(Arrays.asList(
+            "foo\\bar@" + kdc.getRealm(), "bar\\foo@" + kdc.getRealm())),
+            principals);
+  }
+
+  private static class KerberosConfiguration extends Configuration {
+    private String principal;
+    private String keytab;
+    private boolean isInitiator;
+
+    private KerberosConfiguration(String principal, File keytab,
+                                  boolean client) {
+      this.principal = principal;
+      this.keytab = keytab.getAbsolutePath();
+      this.isInitiator = client;
+    }
+
+    public static Configuration createClientConfig(String principal,
+                                                   File keytab) {
+      return new KerberosConfiguration(principal, keytab, true);
+    }
+
+    public static Configuration createServerConfig(String principal,
+                                                   File keytab) {
+      return new KerberosConfiguration(principal, keytab, false);
+    }
+
+    private static String getKrb5LoginModuleName() {
+      return System.getProperty("java.vendor").contains("IBM")
+              ? "com.ibm.security.auth.module.Krb5LoginModule"
+              : "com.sun.security.auth.module.Krb5LoginModule";
+    }
+
+    @Override
+    public AppConfigurationEntry[] getAppConfigurationEntry(String name) {
+      Map<String, String> options = new HashMap<String, String>();
+      options.put("keyTab", keytab);
+      options.put("principal", principal);
+      options.put("useKeyTab", "true");
+      options.put("storeKey", "true");
+      options.put("doNotPrompt", "true");
+      options.put("useTicketCache", "true");
+      options.put("renewTGT", "true");
+      options.put("refreshKrb5Config", "true");
+      options.put("isInitiator", Boolean.toString(isInitiator));
+      String ticketCache = System.getenv("KRB5CCNAME");
+      if (ticketCache != null) {
+        options.put("ticketCache", ticketCache);
+      }
+      options.put("debug", "true");
+
+      return new AppConfigurationEntry[]{
+              new AppConfigurationEntry(getKrb5LoginModuleName(),
+                      AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
+                      options)};
+    }
+  }
+
+  @Test
+  public void testKerberosLogin() throws Exception {
+    MiniKdc kdc = getKdc();
+    File workDir = getWorkDir();
+    LoginContext loginContext = null;
+    try {
+      String principal = "foo";
+      File keytab = new File(workDir, "foo.keytab");
+      kdc.createPrincipal(keytab, principal);
+
+      Set<Principal> principals = new HashSet<Principal>();
+      principals.add(new KerberosPrincipal(principal));
+
+      //client login
+      Subject subject = new Subject(false, principals, new HashSet<Object>(),
+              new HashSet<Object>());
+      loginContext = new LoginContext("", subject, null,
+              KerberosConfiguration.createClientConfig(principal, keytab));
+      loginContext.login();
+      subject = loginContext.getSubject();
+      Assert.assertEquals(1, subject.getPrincipals().size());
+      Assert.assertEquals(KerberosPrincipal.class,
+              subject.getPrincipals().iterator().next().getClass());
+      Assert.assertEquals(principal + "@" + kdc.getRealm(),
+              subject.getPrincipals().iterator().next().getName());
+      loginContext.login();
+
+      //server login
+      subject = new Subject(false, principals, new HashSet<Object>(),
+              new HashSet<Object>());
+      loginContext = new LoginContext("", subject, null,
+              KerberosConfiguration.createServerConfig(principal, keytab));
+      loginContext.login();
+      subject = loginContext.getSubject();
+      Assert.assertEquals(1, subject.getPrincipals().size());
+      Assert.assertEquals(KerberosPrincipal.class,
+              subject.getPrincipals().iterator().next().getClass());
+      Assert.assertEquals(principal + "@" + kdc.getRealm(),
+              subject.getPrincipals().iterator().next().getName());
+      loginContext.login();
+
+    } finally {
+      if (loginContext != null) {
+        loginContext.logout();
+      }
+    }
+  }
+
+}

+ 1 - 0
hadoop-common-project/pom.xml

@@ -36,6 +36,7 @@
     <module>hadoop-common</module>
     <module>hadoop-common</module>
     <module>hadoop-annotations</module>
     <module>hadoop-annotations</module>
     <module>hadoop-nfs</module>
     <module>hadoop-nfs</module>
+    <module>hadoop-minikdc</module>
   </modules>
   </modules>
 
 
   <build>
   <build>

+ 11 - 0
hadoop-hdfs-project/hadoop-hdfs/CHANGES.txt

@@ -265,6 +265,11 @@ Release 2.3.0 - UNRELEASED
     HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
     HDFS-5035.  getFileLinkStatus and rename do not correctly check permissions
     of symlinks.  (Andrew Wang via Colin Patrick McCabe)
     of symlinks.  (Andrew Wang via Colin Patrick McCabe)
 
 
+    HDFS-5065. TestSymlinkHdfsDisable fails on Windows. (ivanmi)
+
+    HDFS-4816. transitionToActive blocks if the SBN is doing checkpoint image
+    transfer. (Andrew Wang)
+
 Release 2.1.1-beta - UNRELEASED
 Release 2.1.1-beta - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -304,6 +309,12 @@ Release 2.1.1-beta - UNRELEASED
     HDFS-5043. For HdfsFileStatus, set default value of childrenNum to -1
     HDFS-5043. For HdfsFileStatus, set default value of childrenNum to -1
     instead of 0 to avoid confusing applications. (brandonli)
     instead of 0 to avoid confusing applications. (brandonli)
 
 
+    HDFS-4993. Fsck can fail if a file is renamed or deleted. (Robert Parker
+    via kihwal)
+
+    HDFS-5091. Support for spnego keytab separate from the JournalNode keytab 
+    for secure HA. (jing9)
+
 Release 2.1.0-beta - 2013-08-06
 Release 2.1.0-beta - 2013-08-06
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 8 - 0
hadoop-hdfs-project/hadoop-hdfs/pom.xml

@@ -417,6 +417,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
@@ -441,6 +443,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
@@ -462,6 +466,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
@@ -483,6 +489,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>

+ 2 - 0
hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml

@@ -103,6 +103,8 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto</param>
                 <param>${basedir}/../../../../../hadoop-hdfs-project/hadoop-hdfs/src/main/proto</param>

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNodeHttpServer.java

@@ -31,6 +31,7 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.hdfs.server.common.JspHelper;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
@@ -74,7 +75,7 @@ public class JournalNodeHttpServer {
       {
       {
         if (UserGroupInformation.isSecurityEnabled()) {
         if (UserGroupInformation.isSecurityEnabled()) {
           initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
           initSpnego(conf, DFS_JOURNALNODE_INTERNAL_SPNEGO_USER_NAME_KEY,
-              DFS_JOURNALNODE_KEYTAB_FILE_KEY);
+              DFSUtil.getSpnegoKeytabKey(conf, DFS_JOURNALNODE_KEYTAB_FILE_KEY));
         }
         }
       }
       }
     };
     };

+ 8 - 3
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/NamenodeFsck.java

@@ -142,7 +142,7 @@ public class NamenodeFsck {
   /**
   /**
    * Filesystem checker.
    * Filesystem checker.
    * @param conf configuration (namenode config)
    * @param conf configuration (namenode config)
-   * @param nn namenode that this fsck is going to use
+   * @param namenode namenode that this fsck is going to use
    * @param pmap key=value[] map passed to the http servlet as url parameters
    * @param pmap key=value[] map passed to the http servlet as url parameters
    * @param out output stream to write the fsck output
    * @param out output stream to write the fsck output
    * @param totalDatanodes number of live datanodes
    * @param totalDatanodes number of live datanodes
@@ -302,8 +302,13 @@ public class NamenodeFsck {
     long fileLen = file.getLen();
     long fileLen = file.getLen();
     // Get block locations without updating the file access time 
     // Get block locations without updating the file access time 
     // and without block access tokens
     // and without block access tokens
-    LocatedBlocks blocks = namenode.getNamesystem().getBlockLocations(path, 0,
-        fileLen, false, false, false);
+    LocatedBlocks blocks;
+    try {
+      blocks = namenode.getNamesystem().getBlockLocations(path, 0,
+          fileLen, false, false, false);
+    } catch (FileNotFoundException fnfe) {
+      blocks = null;
+    }
     if (blocks == null) { // the file is deleted
     if (blocks == null) { // the file is deleted
       return;
       return;
     }
     }

+ 34 - 5
hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/ha/StandbyCheckpointer.java

@@ -17,9 +17,17 @@
  */
  */
 package org.apache.hadoop.hdfs.server.namenode.ha;
 package org.apache.hadoop.hdfs.server.namenode.ha;
 
 
+import static org.apache.hadoop.util.Time.now;
+
 import java.io.IOException;
 import java.io.IOException;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.security.PrivilegedAction;
 import java.security.PrivilegedAction;
+import java.util.concurrent.Callable;
+import java.util.concurrent.ExecutionException;
+import java.util.concurrent.ExecutorService;
+import java.util.concurrent.Executors;
+import java.util.concurrent.Future;
+import java.util.concurrent.ThreadFactory;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
@@ -38,10 +46,10 @@ import org.apache.hadoop.hdfs.util.Canceler;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.SecurityUtil;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
-import static org.apache.hadoop.util.Time.now;
 
 
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.base.Preconditions;
 import com.google.common.base.Preconditions;
+import com.google.common.util.concurrent.ThreadFactoryBuilder;
 
 
 /**
 /**
  * Thread which runs inside the NN when it's in Standby state,
  * Thread which runs inside the NN when it's in Standby state,
@@ -57,6 +65,7 @@ public class StandbyCheckpointer {
   private final FSNamesystem namesystem;
   private final FSNamesystem namesystem;
   private long lastCheckpointTime;
   private long lastCheckpointTime;
   private final CheckpointerThread thread;
   private final CheckpointerThread thread;
+  private final ThreadFactory uploadThreadFactory;
   private String activeNNAddress;
   private String activeNNAddress;
   private InetSocketAddress myNNAddress;
   private InetSocketAddress myNNAddress;
 
 
@@ -72,6 +81,8 @@ public class StandbyCheckpointer {
     this.namesystem = ns;
     this.namesystem = ns;
     this.checkpointConf = new CheckpointConf(conf); 
     this.checkpointConf = new CheckpointConf(conf); 
     this.thread = new CheckpointerThread();
     this.thread = new CheckpointerThread();
+    this.uploadThreadFactory = new ThreadFactoryBuilder().setDaemon(true)
+        .setNameFormat("TransferFsImageUpload-%d").build();
 
 
     setNameNodeAddresses(conf);
     setNameNodeAddresses(conf);
   }
   }
@@ -142,7 +153,7 @@ public class StandbyCheckpointer {
 
 
   private void doCheckpoint() throws InterruptedException, IOException {
   private void doCheckpoint() throws InterruptedException, IOException {
     assert canceler != null;
     assert canceler != null;
-    long txid;
+    final long txid;
     
     
     namesystem.writeLockInterruptibly();
     namesystem.writeLockInterruptibly();
     try {
     try {
@@ -171,9 +182,26 @@ public class StandbyCheckpointer {
     }
     }
     
     
     // Upload the saved checkpoint back to the active
     // Upload the saved checkpoint back to the active
-    TransferFsImage.uploadImageFromStorage(
-        activeNNAddress, myNNAddress,
-        namesystem.getFSImage().getStorage(), txid);
+    // Do this in a separate thread to avoid blocking transition to active
+    // See HDFS-4816
+    ExecutorService executor =
+        Executors.newSingleThreadExecutor(uploadThreadFactory);
+    Future<Void> upload = executor.submit(new Callable<Void>() {
+      @Override
+      public Void call() throws IOException {
+        TransferFsImage.uploadImageFromStorage(
+            activeNNAddress, myNNAddress,
+            namesystem.getFSImage().getStorage(), txid);
+        return null;
+      }
+    });
+    executor.shutdown();
+    try {
+      upload.get();
+    } catch (ExecutionException e) {
+      throw new IOException("Exception during image upload: " + e.getMessage(),
+          e.getCause());
+    }
   }
   }
   
   
   /**
   /**
@@ -301,6 +329,7 @@ public class StandbyCheckpointer {
           LOG.info("Checkpoint was cancelled: " + ce.getMessage());
           LOG.info("Checkpoint was cancelled: " + ce.getMessage());
           canceledCount++;
           canceledCount++;
         } catch (InterruptedException ie) {
         } catch (InterruptedException ie) {
+          LOG.info("Interrupted during checkpointing", ie);
           // Probably requested shutdown.
           // Probably requested shutdown.
           continue;
           continue;
         } catch (Throwable t) {
         } catch (Throwable t) {

+ 3 - 2
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestGlobPaths.java

@@ -405,6 +405,8 @@ public class TestGlobPaths {
 
 
     status = fs.globStatus(new Path("/x/x"), falseFilter);
     status = fs.globStatus(new Path("/x/x"), falseFilter);
     assertNull(status);
     assertNull(status);
+
+    cleanupDFS();
   }
   }
   
   
   private void checkStatus(FileStatus[] status, Path ... expectedMatches) {
   private void checkStatus(FileStatus[] status, Path ... expectedMatches) {
@@ -783,8 +785,7 @@ public class TestGlobPaths {
     return globResults;
     return globResults;
   }
   }
   
   
-  @After
-  public void cleanupDFS() throws IOException {
+  private void cleanupDFS() throws IOException {
     fs.delete(new Path(USER_DIR), true);
     fs.delete(new Path(USER_DIR), true);
   }
   }
   
   

+ 2 - 1
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/TestSymlinkHdfsDisable.java

@@ -42,7 +42,8 @@ public class TestSymlinkHdfsDisable {
     DistributedFileSystem dfs = cluster.getFileSystem();
     DistributedFileSystem dfs = cluster.getFileSystem();
     FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
     FileContext fc = FileContext.getFileContext(cluster.getURI(0), conf);
     // Create test files/links
     // Create test files/links
-    FileContextTestHelper helper = new FileContextTestHelper();
+    FileContextTestHelper helper = new FileContextTestHelper(
+        "/tmp/TestSymlinkHdfsDisable");
     Path root = helper.getTestRootPath(fc);
     Path root = helper.getTestRootPath(fc);
     Path target = new Path(root, "target");
     Path target = new Path(root, "target");
     Path link = new Path(root, "link");
     Path link = new Path(root, "link");

+ 218 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/fs/shell/TestTextCommand.java

@@ -0,0 +1,218 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.fs.shell;
+
+import static org.junit.Assert.assertEquals;
+import static org.junit.Assert.assertTrue;
+
+import java.io.IOException;
+import java.io.InputStream;
+import java.io.StringWriter;
+import java.lang.reflect.Method;
+
+import org.apache.commons.io.IOUtils;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.fs.FSDataOutputStream;
+import org.apache.hadoop.fs.FileSystem;
+import org.apache.hadoop.fs.Path;
+import org.apache.hadoop.hdfs.HdfsConfiguration;
+import org.apache.hadoop.hdfs.MiniDFSCluster;
+import org.junit.After;
+import org.junit.Before;
+import org.junit.Test;
+
+
+/**
+ * This class tests the logic for displaying the binary formats supported
+ * by the Text command.
+ */
+public class TestTextCommand {
+  private static final String TEST_ROOT_DIR =
+    System.getProperty("test.build.data", "build/test/data/") + "/testText";
+  private static final Path AVRO_FILENAME = new Path(TEST_ROOT_DIR, "weather.avro");
+  private static MiniDFSCluster cluster;
+  private static FileSystem fs;
+  
+  @Before
+    public void setUp() throws IOException{
+    Configuration conf = new HdfsConfiguration();
+    cluster = new MiniDFSCluster.Builder(conf).build();
+    cluster.waitActive();
+    fs = cluster.getFileSystem();
+  }
+
+  @After
+    public void tearDown() throws IOException{
+    if(fs != null){
+      fs.close();
+    }
+    if(cluster != null){
+      cluster.shutdown();
+    }
+  }
+  
+  /**
+   * Tests whether binary Avro data files are displayed correctly.
+   */
+  @Test
+    public void testDisplayForAvroFiles() throws Exception {
+    // Create a small Avro data file on the HDFS.
+    createAvroFile(generateWeatherAvroBinaryData());
+
+    // Prepare and call the Text command's protected getInputStream method
+    // using reflection.
+    Configuration conf = fs.getConf();
+    PathData pathData = new PathData(AVRO_FILENAME.toString(), conf);
+    Display.Text text = new Display.Text();
+    text.setConf(conf);
+    Method method = text.getClass().getDeclaredMethod(
+                                                      "getInputStream", PathData.class);
+    method.setAccessible(true);
+    InputStream stream = (InputStream) method.invoke(text, pathData);
+    String output = inputStreamToString(stream);
+
+    // Check the output.
+    String expectedOutput =
+      "{\"station\":\"011990-99999\",\"time\":-619524000000,\"temp\":0}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"011990-99999\",\"time\":-619506000000,\"temp\":22}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"011990-99999\",\"time\":-619484400000,\"temp\":-11}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"012650-99999\",\"time\":-655531200000,\"temp\":111}" +
+      System.getProperty("line.separator") +
+      "{\"station\":\"012650-99999\",\"time\":-655509600000,\"temp\":78}" +
+      System.getProperty("line.separator");
+
+    assertEquals(expectedOutput, output);
+  }
+
+  private String inputStreamToString(InputStream stream) throws IOException {
+    StringWriter writer = new StringWriter();
+    IOUtils.copy(stream, writer);
+    return writer.toString();
+  }
+
+  private void createAvroFile(byte[] contents) throws IOException {
+    FSDataOutputStream stream = fs.create(AVRO_FILENAME);
+    stream.write(contents);
+    stream.close();
+    assertTrue(fs.exists(AVRO_FILENAME));
+  }
+
+  private byte[] generateWeatherAvroBinaryData() {
+    // The contents of a simple binary Avro file with weather records.
+    byte[] contents = {
+      (byte) 0x4f, (byte) 0x62, (byte) 0x6a, (byte)  0x1,
+      (byte)  0x4, (byte) 0x14, (byte) 0x61, (byte) 0x76,
+      (byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x63,
+      (byte) 0x6f, (byte) 0x64, (byte) 0x65, (byte) 0x63,
+      (byte)  0x8, (byte) 0x6e, (byte) 0x75, (byte) 0x6c,
+      (byte) 0x6c, (byte) 0x16, (byte) 0x61, (byte) 0x76,
+      (byte) 0x72, (byte) 0x6f, (byte) 0x2e, (byte) 0x73,
+      (byte) 0x63, (byte) 0x68, (byte) 0x65, (byte) 0x6d,
+      (byte) 0x61, (byte) 0xf2, (byte)  0x2, (byte) 0x7b,
+      (byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x72, (byte) 0x65, (byte) 0x63, (byte) 0x6f,
+      (byte) 0x72, (byte) 0x64, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x57, (byte) 0x65, (byte) 0x61, (byte) 0x74,
+      (byte) 0x68, (byte) 0x65, (byte) 0x72, (byte) 0x22,
+      (byte) 0x2c, (byte) 0x22, (byte) 0x6e, (byte) 0x61,
+      (byte) 0x6d, (byte) 0x65, (byte) 0x73, (byte) 0x70,
+      (byte) 0x61, (byte) 0x63, (byte) 0x65, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x65,
+      (byte) 0x73, (byte) 0x74, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x66, (byte) 0x69, (byte) 0x65,
+      (byte) 0x6c, (byte) 0x64, (byte) 0x73, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x5b, (byte) 0x7b, (byte) 0x22,
+      (byte) 0x6e, (byte) 0x61, (byte) 0x6d, (byte) 0x65,
+      (byte) 0x22, (byte) 0x3a, (byte) 0x22, (byte) 0x73,
+      (byte) 0x74, (byte) 0x61, (byte) 0x74, (byte) 0x69,
+      (byte) 0x6f, (byte) 0x6e, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x73, (byte) 0x74, (byte) 0x72, (byte) 0x69,
+      (byte) 0x6e, (byte) 0x67, (byte) 0x22, (byte) 0x7d,
+      (byte) 0x2c, (byte) 0x7b, (byte) 0x22, (byte) 0x6e,
+      (byte) 0x61, (byte) 0x6d, (byte) 0x65, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x22, (byte) 0x74, (byte) 0x69,
+      (byte) 0x6d, (byte) 0x65, (byte) 0x22, (byte) 0x2c,
+      (byte) 0x22, (byte) 0x74, (byte) 0x79, (byte) 0x70,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x6c, (byte) 0x6f, (byte) 0x6e, (byte) 0x67,
+      (byte) 0x22, (byte) 0x7d, (byte) 0x2c, (byte) 0x7b,
+      (byte) 0x22, (byte) 0x6e, (byte) 0x61, (byte) 0x6d,
+      (byte) 0x65, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x74, (byte) 0x65, (byte) 0x6d, (byte) 0x70,
+      (byte) 0x22, (byte) 0x2c, (byte) 0x22, (byte) 0x74,
+      (byte) 0x79, (byte) 0x70, (byte) 0x65, (byte) 0x22,
+      (byte) 0x3a, (byte) 0x22, (byte) 0x69, (byte) 0x6e,
+      (byte) 0x74, (byte) 0x22, (byte) 0x7d, (byte) 0x5d,
+      (byte) 0x2c, (byte) 0x22, (byte) 0x64, (byte) 0x6f,
+      (byte) 0x63, (byte) 0x22, (byte) 0x3a, (byte) 0x22,
+      (byte) 0x41, (byte) 0x20, (byte) 0x77, (byte) 0x65,
+      (byte) 0x61, (byte) 0x74, (byte) 0x68, (byte) 0x65,
+      (byte) 0x72, (byte) 0x20, (byte) 0x72, (byte) 0x65,
+      (byte) 0x61, (byte) 0x64, (byte) 0x69, (byte) 0x6e,
+      (byte) 0x67, (byte) 0x2e, (byte) 0x22, (byte) 0x7d,
+      (byte)  0x0, (byte) 0xb0, (byte) 0x81, (byte) 0xb3,
+      (byte) 0xc4, (byte)  0xa, (byte)  0xc, (byte) 0xf6,
+      (byte) 0x62, (byte) 0xfa, (byte) 0xc9, (byte) 0x38,
+      (byte) 0xfd, (byte) 0x7e, (byte) 0x52, (byte)  0x0,
+      (byte) 0xa7, (byte)  0xa, (byte) 0xcc, (byte)  0x1,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
+      (byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0xa3, (byte) 0x90,
+      (byte) 0xe8, (byte) 0x87, (byte) 0x24, (byte)  0x0,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
+      (byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0x81, (byte) 0xfb,
+      (byte) 0xd6, (byte) 0x87, (byte) 0x24, (byte) 0x2c,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x31,
+      (byte) 0x39, (byte) 0x39, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0xa5, (byte) 0xae,
+      (byte) 0xc2, (byte) 0x87, (byte) 0x24, (byte) 0x15,
+      (byte) 0x18, (byte) 0x30, (byte) 0x31, (byte) 0x32,
+      (byte) 0x36, (byte) 0x35, (byte) 0x30, (byte) 0x2d,
+      (byte) 0x39, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0xff, (byte) 0xb7, (byte) 0xa2,
+      (byte) 0x8b, (byte) 0x94, (byte) 0x26, (byte) 0xde,
+      (byte)  0x1, (byte) 0x18, (byte) 0x30, (byte) 0x31,
+      (byte) 0x32, (byte) 0x36, (byte) 0x35, (byte) 0x30,
+      (byte) 0x2d, (byte) 0x39, (byte) 0x39, (byte) 0x39,
+      (byte) 0x39, (byte) 0x39, (byte) 0xff, (byte) 0xdb,
+      (byte) 0xd5, (byte) 0xf6, (byte) 0x93, (byte) 0x26,
+      (byte) 0x9c, (byte)  0x1, (byte) 0xb0, (byte) 0x81,
+      (byte) 0xb3, (byte) 0xc4, (byte)  0xa, (byte)  0xc,
+      (byte) 0xf6, (byte) 0x62, (byte) 0xfa, (byte) 0xc9,
+      (byte) 0x38, (byte) 0xfd, (byte) 0x7e, (byte) 0x52,
+      (byte)  0x0, (byte) 0xa7,
+    };
+
+    return contents;
+  }
+}
+
+

+ 57 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFsck.java

@@ -23,6 +23,7 @@ import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNotNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertNull;
 import static org.junit.Assert.assertTrue;
 import static org.junit.Assert.assertTrue;
+import static org.junit.Assert.fail;
 
 
 import java.io.BufferedReader;
 import java.io.BufferedReader;
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
@@ -58,6 +59,7 @@ import org.apache.hadoop.hdfs.DFSClient;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSConfigKeys;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSInputStream;
 import org.apache.hadoop.hdfs.DFSTestUtil;
 import org.apache.hadoop.hdfs.DFSTestUtil;
+import org.apache.hadoop.hdfs.DFSUtil;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.DistributedFileSystem;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.HdfsConfiguration;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
 import org.apache.hadoop.hdfs.MiniDFSCluster;
@@ -81,6 +83,8 @@ import org.apache.log4j.RollingFileAppender;
 import org.junit.Test;
 import org.junit.Test;
 
 
 import com.google.common.collect.Sets;
 import com.google.common.collect.Sets;
+import org.mockito.Mockito;
+import static org.mockito.Mockito.*;
 
 
 /**
 /**
  * A JUnit test for doing fsck
  * A JUnit test for doing fsck
@@ -876,6 +880,59 @@ public class TestFsck {
     }
     }
   }
   }
 
 
+  /** Test fsck with FileNotFound */
+  @Test
+  public void testFsckFileNotFound() throws Exception {
+
+    // Number of replicas to actually start
+    final short NUM_REPLICAS = 1;
+
+    Configuration conf = new Configuration();
+    NameNode namenode = mock(NameNode.class);
+    NetworkTopology nettop = mock(NetworkTopology.class);
+    Map<String,String[]> pmap = new HashMap<String, String[]>();
+    Writer result = new StringWriter();
+    PrintWriter out = new PrintWriter(result, true);
+    InetAddress remoteAddress = InetAddress.getLocalHost();
+    FSNamesystem fsName = mock(FSNamesystem.class);
+    when(namenode.getNamesystem()).thenReturn(fsName);
+    when(fsName.getBlockLocations(anyString(), anyLong(), anyLong(),
+        anyBoolean(), anyBoolean(), anyBoolean())).
+        thenThrow(new FileNotFoundException()) ;
+
+    NamenodeFsck fsck = new NamenodeFsck(conf, namenode, nettop, pmap, out,
+        NUM_REPLICAS, (short)1, remoteAddress);
+
+    String pathString = "/tmp/testFile";
+
+    long length = 123L;
+    boolean isDir = false;
+    int blockReplication = 1;
+    long blockSize = 128 *1024L;
+    long modTime = 123123123L;
+    long accessTime = 123123120L;
+    FsPermission perms = FsPermission.getDefault();
+    String owner = "foo";
+    String group = "bar";
+    byte [] symlink = null;
+    byte [] path = new byte[128];
+    path = DFSUtil.string2Bytes(pathString);
+    long fileId = 312321L;
+    int numChildren = 1;
+
+    HdfsFileStatus file = new HdfsFileStatus(length, isDir, blockReplication,
+        blockSize, modTime, accessTime, perms, owner, group, symlink, path,
+        fileId, numChildren);
+    Result res = new Result(conf);
+
+    try {
+      fsck.check(pathString, file, res);
+    } catch (Exception e) {
+      fail("Unexpected exception "+ e.getMessage());
+    }
+    assertTrue(res.toString().contains("HEALTHY"));
+  }
+
   /** Test fsck with symlinks in the filesystem */
   /** Test fsck with symlinks in the filesystem */
   @Test
   @Test
   public void testFsckSymlink() throws Exception {
   public void testFsckSymlink() throws Exception {

+ 28 - 0
hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/ha/TestStandbyCheckpoints.java

@@ -239,6 +239,34 @@ public class TestStandbyCheckpoints {
     
     
     assertTrue(canceledOne);
     assertTrue(canceledOne);
   }
   }
+
+  /**
+   * Test cancellation of ongoing checkpoints when failover happens
+   * mid-checkpoint during image upload from standby to active NN.
+   */
+  @Test(timeout=60000)
+  public void testCheckpointCancellationDuringUpload() throws Exception {
+    // don't compress, we want a big image
+    cluster.getConfiguration(0).setBoolean(
+        DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
+    cluster.getConfiguration(1).setBoolean(
+        DFSConfigKeys.DFS_IMAGE_COMPRESS_KEY, false);
+    // Throttle SBN upload to make it hang during upload to ANN
+    cluster.getConfiguration(1).setLong(
+        DFSConfigKeys.DFS_IMAGE_TRANSFER_RATE_KEY, 100);
+    cluster.restartNameNode(0);
+    cluster.restartNameNode(1);
+    nn0 = cluster.getNameNode(0);
+    nn1 = cluster.getNameNode(1);
+
+    cluster.transitionToActive(0);
+
+    doEdits(0, 100);
+    HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
+    HATestUtil.waitForCheckpoint(cluster, 1, ImmutableList.of(104));
+    cluster.transitionToStandby(0);
+    cluster.transitionToActive(1);
+  }
   
   
   /**
   /**
    * Make sure that clients will receive StandbyExceptions even when a
    * Make sure that clients will receive StandbyExceptions even when a

+ 3 - 0
hadoop-mapreduce-project/CHANGES.txt

@@ -220,6 +220,9 @@ Release 2.1.1-beta - UNRELEASED
     MAPREDUCE-5425. Junit in TestJobHistoryServer failing in jdk 7 (Robert
     MAPREDUCE-5425. Junit in TestJobHistoryServer failing in jdk 7 (Robert
     Parker via jlowe)
     Parker via jlowe)
 
 
+    MAPREDUCE-5454. TestDFSIO fails intermittently on JDK7 (Karthik Kambatla
+    via Sandy Ryza)
+
 Release 2.1.0-beta - 2013-08-06
 Release 2.1.0-beta - 2013-08-06
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-common/pom.xml

@@ -64,6 +64,8 @@
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../../hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto</param>
                 <param>${basedir}/../../../hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/proto</param>

+ 2 - 0
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/pom.xml

@@ -78,6 +78,8 @@
 				<goal>protoc</goal>
 				<goal>protoc</goal>
 				</goals>
 				</goals>
 				<configuration>
 				<configuration>
+        <protocVersion>${protobuf.version}</protocVersion>
+        <protocCommand>${protoc.path}</protocCommand>
 				<imports>
 				<imports>
 					<param>
 					<param>
 						${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto
 						${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto

+ 9 - 7
hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/fs/TestDFSIO.java

@@ -208,6 +208,9 @@ public class TestDFSIO implements Tool {
                                 .build();
                                 .build();
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
     bench.createControlFile(fs, DEFAULT_NR_BYTES, DEFAULT_NR_FILES);
     bench.createControlFile(fs, DEFAULT_NR_BYTES, DEFAULT_NR_FILES);
+
+    /** Check write here, as it is required for other tests */
+    testWrite();
   }
   }
 
 
   @AfterClass
   @AfterClass
@@ -219,8 +222,7 @@ public class TestDFSIO implements Tool {
     cluster.shutdown();
     cluster.shutdown();
   }
   }
 
 
-  @Test
-  public void testWrite() throws Exception {
+  public static void testWrite() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
     long tStart = System.currentTimeMillis();
     long tStart = System.currentTimeMillis();
     bench.writeTest(fs);
     bench.writeTest(fs);
@@ -228,7 +230,7 @@ public class TestDFSIO implements Tool {
     bench.analyzeResult(fs, TestType.TEST_TYPE_WRITE, execTime);
     bench.analyzeResult(fs, TestType.TEST_TYPE_WRITE, execTime);
   }
   }
 
 
-  @Test
+  @Test (timeout = 3000)
   public void testRead() throws Exception {
   public void testRead() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
     long tStart = System.currentTimeMillis();
     long tStart = System.currentTimeMillis();
@@ -237,7 +239,7 @@ public class TestDFSIO implements Tool {
     bench.analyzeResult(fs, TestType.TEST_TYPE_READ, execTime);
     bench.analyzeResult(fs, TestType.TEST_TYPE_READ, execTime);
   }
   }
 
 
-  @Test
+  @Test (timeout = 3000)
   public void testReadRandom() throws Exception {
   public void testReadRandom() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
     long tStart = System.currentTimeMillis();
     long tStart = System.currentTimeMillis();
@@ -247,7 +249,7 @@ public class TestDFSIO implements Tool {
     bench.analyzeResult(fs, TestType.TEST_TYPE_READ_RANDOM, execTime);
     bench.analyzeResult(fs, TestType.TEST_TYPE_READ_RANDOM, execTime);
   }
   }
 
 
-  @Test
+  @Test (timeout = 3000)
   public void testReadBackward() throws Exception {
   public void testReadBackward() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
     long tStart = System.currentTimeMillis();
     long tStart = System.currentTimeMillis();
@@ -257,7 +259,7 @@ public class TestDFSIO implements Tool {
     bench.analyzeResult(fs, TestType.TEST_TYPE_READ_BACKWARD, execTime);
     bench.analyzeResult(fs, TestType.TEST_TYPE_READ_BACKWARD, execTime);
   }
   }
 
 
-  @Test
+  @Test (timeout = 3000)
   public void testReadSkip() throws Exception {
   public void testReadSkip() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
     long tStart = System.currentTimeMillis();
     long tStart = System.currentTimeMillis();
@@ -267,7 +269,7 @@ public class TestDFSIO implements Tool {
     bench.analyzeResult(fs, TestType.TEST_TYPE_READ_SKIP, execTime);
     bench.analyzeResult(fs, TestType.TEST_TYPE_READ_SKIP, execTime);
   }
   }
 
 
-  @Test
+  @Test (timeout = 3000)
   public void testAppend() throws Exception {
   public void testAppend() throws Exception {
     FileSystem fs = cluster.getFileSystem();
     FileSystem fs = cluster.getFileSystem();
     long tStart = System.currentTimeMillis();
     long tStart = System.currentTimeMillis();

+ 30 - 4
hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/protoc/ProtocMojo.java

@@ -45,19 +45,45 @@ public class ProtocMojo extends AbstractMojo {
   @Parameter(required=true)
   @Parameter(required=true)
   private FileSet source;
   private FileSet source;
 
 
-  @Parameter(defaultValue="protoc")
+  @Parameter
   private String protocCommand;
   private String protocCommand;
 
 
+  @Parameter(required=true)
+  private String protocVersion;
 
 
   public void execute() throws MojoExecutionException {
   public void execute() throws MojoExecutionException {
     try {
     try {
+      if (protocCommand == null || protocCommand.trim().isEmpty()) {
+        protocCommand = "protoc";
+      }
+      List<String> command = new ArrayList<String>();
+      command.add(protocCommand);
+      command.add("--version");
+      Exec exec = new Exec(this);
+      List<String> out = new ArrayList<String>();
+      if (exec.run(command, out) == 127) {
+        getLog().error("protoc, not found at: " + protocCommand);
+        throw new MojoExecutionException("protoc failure");        
+      } else {
+        if (out.isEmpty()) {
+          getLog().error("stdout: " + out);
+          throw new MojoExecutionException(
+              "'protoc --version' did not return a version");
+        } else {
+          if (!out.get(0).endsWith(protocVersion)) {
+            throw new MojoExecutionException(
+                "protoc version is '" + out.get(0) + "', expected version is '" 
+                    + protocVersion + "'");            
+          }
+        }
+      }
       if (!output.mkdirs()) {
       if (!output.mkdirs()) {
         if (!output.exists()) {
         if (!output.exists()) {
           throw new MojoExecutionException("Could not create directory: " + 
           throw new MojoExecutionException("Could not create directory: " + 
             output);
             output);
         }
         }
       }
       }
-      List<String> command = new ArrayList<String>();
+      command = new ArrayList<String>();
       command.add(protocCommand);
       command.add(protocCommand);
       command.add("--java_out=" + output.getCanonicalPath());
       command.add("--java_out=" + output.getCanonicalPath());
       if (imports != null) {
       if (imports != null) {
@@ -68,8 +94,8 @@ public class ProtocMojo extends AbstractMojo {
       for (File f : FileSetUtils.convertFileSetToFiles(source)) {
       for (File f : FileSetUtils.convertFileSetToFiles(source)) {
         command.add(f.getCanonicalPath());
         command.add(f.getCanonicalPath());
       }
       }
-      Exec exec = new Exec(this);
-      List<String> out = new ArrayList<String>();
+      exec = new Exec(this);
+      out = new ArrayList<String>();
       if (exec.run(command, out) != 0) {
       if (exec.run(command, out) != 0) {
         getLog().error("protoc compiler error");
         getLog().error("protoc compiler error");
         for (String s : out) {
         for (String s : out) {

+ 3 - 4
hadoop-maven-plugins/src/main/java/org/apache/hadoop/maven/plugin/util/Exec.java

@@ -63,11 +63,10 @@ public class Exec {
         for (String s : stdErr.getOutput()) {
         for (String s : stdErr.getOutput()) {
           mojo.getLog().debug(s);
           mojo.getLog().debug(s);
         }
         }
-      } else {
-        stdOut.join();
-        stdErr.join();
-        output.addAll(stdOut.getOutput());
       }
       }
+      stdOut.join();
+      stdErr.join();
+      output.addAll(stdOut.getOutput());
     } catch (Exception ex) {
     } catch (Exception ex) {
       mojo.getLog().warn(command + " failed: " + ex.toString());
       mojo.getLog().warn(command + " failed: " + ex.toString());
     }
     }

+ 7 - 0
hadoop-project-dist/pom.xml

@@ -40,6 +40,7 @@
 
 
     <hadoop.component>UNDEF</hadoop.component>
     <hadoop.component>UNDEF</hadoop.component>
     <bundle.snappy>false</bundle.snappy>
     <bundle.snappy>false</bundle.snappy>
+    <bundle.snappy.in.bin>false</bundle.snappy.in.bin>
   </properties>
   </properties>
   
   
   <dependencies>
   <dependencies>
@@ -355,6 +356,12 @@
                         mkdir -p $${TARGET_BIN_DIR}
                         mkdir -p $${TARGET_BIN_DIR}
                         cd $${BIN_DIR}
                         cd $${BIN_DIR}
                         $$TAR * | (cd $${TARGET_BIN_DIR}/; $$UNTAR)
                         $$TAR * | (cd $${TARGET_BIN_DIR}/; $$UNTAR)
+                        if [ "${bundle.snappy.in.bin}" = "true" ] ; then
+                          if [ "${bundle.snappy}" = "true" ] ; then
+                            cd ${snappy.lib}
+                            $$TAR *snappy* | (cd $${TARGET_BIN_DIR}/; $$UNTAR)
+                          fi
+                        fi
                       fi
                       fi
                     </echo>
                     </echo>
                     <exec executable="sh" dir="${project.build.directory}" failonerror="true">
                     <exec executable="sh" dir="${project.build.directory}" failonerror="true">

+ 23 - 3
hadoop-project/pom.xml

@@ -57,6 +57,12 @@
     <!-- Used for building path to native library loaded by tests.  Projects -->
     <!-- Used for building path to native library loaded by tests.  Projects -->
     <!-- at different nesting levels in the source tree may need to override. -->
     <!-- at different nesting levels in the source tree may need to override. -->
     <hadoop.common.build.dir>${basedir}/../../hadoop-common-project/hadoop-common/target</hadoop.common.build.dir>
     <hadoop.common.build.dir>${basedir}/../../hadoop-common-project/hadoop-common/target</hadoop.common.build.dir>
+    <java.security.egd>file:///dev/urandom</java.security.egd>
+
+    <!-- ProtocolBuffer version, used to verify the protoc version and -->
+    <!-- define the protobuf JAR version                               -->
+    <protobuf.version>2.5.0</protobuf.version>
+    <protoc.path>${env.HADOOP_PROTOC_PATH}</protoc.path>
   </properties>
   </properties>
 
 
   <dependencyManagement>
   <dependencyManagement>
@@ -288,6 +294,12 @@
         <version>${project.version}</version>
         <version>${project.version}</version>
       </dependency>
       </dependency>
 
 
+      <dependency>
+        <groupId>org.apache.hadoop</groupId>
+        <artifactId>hadoop-minikdc</artifactId>
+        <version>${project.version}</version>
+      </dependency>
+
       <dependency>
       <dependency>
         <groupId>com.google.guava</groupId>
         <groupId>com.google.guava</groupId>
         <artifactId>guava</artifactId>
         <artifactId>guava</artifactId>
@@ -608,7 +620,7 @@
       <dependency>
       <dependency>
         <groupId>com.google.protobuf</groupId>
         <groupId>com.google.protobuf</groupId>
         <artifactId>protobuf-java</artifactId>
         <artifactId>protobuf-java</artifactId>
-        <version>2.4.0a</version>
+        <version>${protobuf.version}</version>
       </dependency>
       </dependency>
       <dependency>
       <dependency>
         <groupId>commons-daemon</groupId>
         <groupId>commons-daemon</groupId>
@@ -831,7 +843,7 @@
 
 
             <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
             <java.net.preferIPv4Stack>true</java.net.preferIPv4Stack>
             <java.security.krb5.conf>${basedir}/src/test/resources/krb5.conf</java.security.krb5.conf>
             <java.security.krb5.conf>${basedir}/src/test/resources/krb5.conf</java.security.krb5.conf>
-            <java.security.egd>file:///dev/urandom</java.security.egd>
+            <java.security.egd>${java.security.egd}</java.security.egd>
             <require.test.libhadoop>${require.test.libhadoop}</require.test.libhadoop>
             <require.test.libhadoop>${require.test.libhadoop}</require.test.libhadoop>
           </systemPropertyVariables>
           </systemPropertyVariables>
           <includes>
           <includes>
@@ -885,6 +897,14 @@
           <family>Windows</family>
           <family>Windows</family>
         </os>
         </os>
       </activation>
       </activation>
+      <properties>
+        <!-- We must use this exact string for egd on Windows, because the -->
+        <!-- JVM will check for an exact string match on this.  If found, it -->
+        <!-- will use a native entropy provider.  This will not really -->
+        <!-- attempt to open a file at this path. -->
+        <java.security.egd>file:/dev/urandom</java.security.egd>
+        <bundle.snappy.in.bin>true</bundle.snappy.in.bin>
+      </properties>
       <build>
       <build>
         <plugins>
         <plugins>
           <plugin>
           <plugin>
@@ -893,7 +913,7 @@
             <configuration>
             <configuration>
               <environmentVariables>
               <environmentVariables>
                 <!-- Specify where to look for the native DLL on Windows -->
                 <!-- Specify where to look for the native DLL on Windows -->
-                <PATH>${env.PATH};${hadoop.common.build.dir}/bin</PATH>
+                <PATH>${env.PATH};${hadoop.common.build.dir}/bin;${snappy.lib}</PATH>
               </environmentVariables>
               </environmentVariables>
             </configuration>
             </configuration>
           </plugin>
           </plugin>

+ 13 - 0
hadoop-yarn-project/CHANGES.txt

@@ -30,6 +30,9 @@ Release 2.3.0 - UNRELEASED
     YARN-758. Augment MockNM to use multiple cores (Karthik Kambatla via
     YARN-758. Augment MockNM to use multiple cores (Karthik Kambatla via
     Sandy Ryza)
     Sandy Ryza)
 
 
+    YARN-1060. Two tests in TestFairScheduler are missing @Test annotation
+    (Niranjan Singh via Sandy Ryza)
+
 Release 2.1.1-beta - UNRELEASED
 Release 2.1.1-beta - UNRELEASED
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -65,6 +68,8 @@ Release 2.1.1-beta - UNRELEASED
     YARN-994. HeartBeat thread in AMRMClientAsync does not handle runtime
     YARN-994. HeartBeat thread in AMRMClientAsync does not handle runtime
     exception correctly (Xuan Gong via bikas)
     exception correctly (Xuan Gong via bikas)
 
 
+    YARN-337. RM handles killed application tracking URL poorly (jlowe)
+
 Release 2.1.0-beta - 2013-08-06
 Release 2.1.0-beta - 2013-08-06
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -514,6 +519,9 @@ Release 2.1.0-beta - 2013-08-06
 
 
     YARN-84. Use Builder to build RPC server. (Brandon Li via suresh)
     YARN-84. Use Builder to build RPC server. (Brandon Li via suresh)
 
 
+    YARN-1046. Disable mem monitoring by default in MiniYARNCluster. (Karthik
+    Kambatla via Sandy Ryza)
+
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     YARN-512. Log aggregation root directory check is more expensive than it
     YARN-512. Log aggregation root directory check is more expensive than it
@@ -797,6 +805,9 @@ Release 2.1.0-beta - 2013-08-06
     YARN-945. Removed setting of AMRMToken's service from ResourceManager
     YARN-945. Removed setting of AMRMToken's service from ResourceManager
     and changed client libraries do it all the time and correctly. (vinodkv)
     and changed client libraries do it all the time and correctly. (vinodkv)
 
 
+    YARN-656. In scheduler UI, including reserved memory in Memory Total can 
+    make it exceed cluster capacity. (Sandy Ryza)
+
   BREAKDOWN OF HADOOP-8562/YARN-191 SUBTASKS AND RELATED JIRAS
   BREAKDOWN OF HADOOP-8562/YARN-191 SUBTASKS AND RELATED JIRAS
 
 
     YARN-158. Yarn creating package-info.java must not depend on sh.
     YARN-158. Yarn creating package-info.java must not depend on sh.
@@ -1169,6 +1180,8 @@ Release 0.23.10 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
+    YARN-337. RM handles killed application tracking URL poorly (jlowe)
+
 Release 0.23.9 - 2013-07-08
 Release 0.23.9 - 2013-07-08
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES

+ 1 - 7
hadoop-yarn-project/hadoop-yarn/README

@@ -8,15 +8,9 @@ Maven: Maven 3
 
 
 Setup
 Setup
 -----
 -----
-Install protobuf 2.4.0a or higher (Download from http://code.google.com/p/protobuf/downloads/list)
+Install protobuf 2.5.0 (Download from http://code.google.com/p/protobuf/downloads/list)
  - install the protoc executable (configure, make, make install)
  - install the protoc executable (configure, make, make install)
  - install the maven artifact (cd java; mvn install)
  - install the maven artifact (cd java; mvn install)
-Installing protoc requires gcc 4.1.x or higher.
-If the make step fails with (Valid until a fix is released for protobuf 2.4.0a)
-    ./google/protobuf/descriptor.h:1152: error:
-    `google::protobuf::internal::Mutex*google::protobuf::DescriptorPool::mutex_'
-    is private
-  Replace descriptor.cc with http://protobuf.googlecode.com/svn-history/r380/trunk/src/google/protobuf/descriptor.cc
 
 
 
 
 Quick Maven Tips
 Quick Maven Tips

+ 2 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml

@@ -45,6 +45,8 @@
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>
                 <param>${basedir}/src/main/proto</param>

+ 8 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/src/main/java/org/apache/hadoop/yarn/conf/YarnConfiguration.java

@@ -711,6 +711,14 @@ public class YarnConfiguration extends Configuration {
    */
    */
   public static boolean DEFAULT_YARN_MINICLUSTER_FIXED_PORTS = false;
   public static boolean DEFAULT_YARN_MINICLUSTER_FIXED_PORTS = false;
 
 
+  /**
+   * Whether users are explicitly trying to control resource monitoring
+   * configuration for the MiniYARNCluster. Disabled by default.
+   */
+  public static final String YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING =
+      YARN_PREFIX + "minicluster.control-resource-monitoring";
+  public static final boolean
+      DEFAULT_YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING = false;
 
 
   /** The log directory for the containers */
   /** The log directory for the containers */
   public static final String YARN_APP_CONTAINER_LOG_DIR =
   public static final String YARN_APP_CONTAINER_LOG_DIR =

+ 2 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml

@@ -73,6 +73,8 @@
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
                 <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>

+ 2 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml

@@ -165,6 +165,8 @@
               <goal>protoc</goal>
               <goal>protoc</goal>
             </goals>
             </goals>
             <configuration>
             <configuration>
+              <protocVersion>${protobuf.version}</protocVersion>
+              <protocCommand>${protoc.path}</protocCommand>
               <imports>
               <imports>
                 <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../../../hadoop-common-project/hadoop-common/src/main/proto</param>
                 <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>
                 <param>${basedir}/../../hadoop-yarn-api/src/main/proto</param>

+ 4 - 6
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/RMAppAttemptImpl.java

@@ -865,6 +865,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
         break;
         break;
         case KILLED:
         case KILLED:
         {
         {
+          // don't leave the tracking URL pointing to a non-existent AM
+          appAttempt.setTrackingUrlToRMAppPage();
           appEvent =
           appEvent =
               new RMAppFailedAttemptEvent(applicationId,
               new RMAppFailedAttemptEvent(applicationId,
                   RMAppEventType.ATTEMPT_KILLED,
                   RMAppEventType.ATTEMPT_KILLED,
@@ -873,6 +875,8 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
         break;
         break;
         case FAILED:
         case FAILED:
         {
         {
+          // don't leave the tracking URL pointing to a non-existent AM
+          appAttempt.setTrackingUrlToRMAppPage();
           appEvent =
           appEvent =
               new RMAppFailedAttemptEvent(applicationId,
               new RMAppFailedAttemptEvent(applicationId,
                   RMAppEventType.ATTEMPT_FAILED,
                   RMAppEventType.ATTEMPT_FAILED,
@@ -1063,7 +1067,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
         RMAppAttemptEvent event) {
         RMAppAttemptEvent event) {
       appAttempt.diagnostics.append("ApplicationMaster for attempt " +
       appAttempt.diagnostics.append("ApplicationMaster for attempt " +
         appAttempt.getAppAttemptId() + " timed out");
         appAttempt.getAppAttemptId() + " timed out");
-      appAttempt.setTrackingUrlToRMAppPage();
       super.transition(appAttempt, event);
       super.transition(appAttempt, event);
     }
     }
   }
   }
@@ -1182,11 +1185,6 @@ public class RMAppAttemptImpl implements RMAppAttempt, Recoverable {
             " due to: " +  containerStatus.getDiagnostics() + "." +
             " due to: " +  containerStatus.getDiagnostics() + "." +
             "Failing this attempt.");
             "Failing this attempt.");
 
 
-        // When the AM dies, the trackingUrl is left pointing to the AM's URL,
-        // which shows up in the scheduler UI as a broken link.  Direct the
-        // user to the app page on the RM so they can see the status and logs.
-        appAttempt.setTrackingUrlToRMAppPage();
-
         new FinalTransition(RMAppAttemptState.FAILED).transition(
         new FinalTransition(RMAppAttemptState.FAILED).transition(
             appAttempt, containerFinishedEvent);
             appAttempt, containerFinishedEvent);
         return RMAppAttemptState.FAILED;
         return RMAppAttemptState.FAILED;

+ 1 - 1
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/dao/ClusterMetricsInfo.java

@@ -77,7 +77,7 @@ public class ClusterMetricsInfo {
     this.containersPending = metrics.getPendingContainers();
     this.containersPending = metrics.getPendingContainers();
     this.containersReserved = metrics.getReservedContainers();
     this.containersReserved = metrics.getReservedContainers();
     
     
-    this.totalMB = availableMB + reservedMB + allocatedMB;
+    this.totalMB = availableMB + allocatedMB;
     this.activeNodes = clusterMetrics.getNumActiveNMs();
     this.activeNodes = clusterMetrics.getNumActiveNMs();
     this.lostNodes = clusterMetrics.getNumLostNMs();
     this.lostNodes = clusterMetrics.getNumLostNMs();
     this.unhealthyNodes = clusterMetrics.getUnhealthyNMs();
     this.unhealthyNodes = clusterMetrics.getUnhealthyNMs();

+ 20 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/rmapp/attempt/TestRMAppAttemptTransitions.java

@@ -691,6 +691,26 @@ public class TestRMAppAttemptTransitions {
     assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl());
     assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl());
   }
   }
 
 
+  @Test
+  public void testRunningToKilled() {
+    Container amContainer = allocateApplicationAttempt();
+    launchApplicationAttempt(amContainer);
+    runApplicationAttempt(amContainer, "host", 8042, "oldtrackingurl");
+    applicationAttempt.handle(
+        new RMAppAttemptEvent(
+            applicationAttempt.getAppAttemptId(),
+            RMAppAttemptEventType.KILL));
+    assertEquals(RMAppAttemptState.KILLED,
+        applicationAttempt.getAppAttemptState());
+    assertEquals(0,applicationAttempt.getJustFinishedContainers().size());
+    assertEquals(amContainer, applicationAttempt.getMasterContainer());
+    assertEquals(0, applicationAttempt.getRanNodes().size());
+    String rmAppPageUrl = pjoin(RM_WEBAPP_ADDR, "cluster", "app",
+        applicationAttempt.getAppAttemptId().getApplicationId());
+    assertEquals(rmAppPageUrl, applicationAttempt.getOriginalTrackingUrl());
+    assertEquals(rmAppPageUrl, applicationAttempt.getTrackingUrl());
+  }
+
   @Test(timeout=10000)
   @Test(timeout=10000)
   public void testLaunchedExpire() {
   public void testLaunchedExpire() {
     Container amContainer = allocateApplicationAttempt();
     Container amContainer = allocateApplicationAttempt();

+ 2 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/scheduler/fair/TestFairScheduler.java

@@ -1991,6 +1991,7 @@ public class TestFairScheduler {
     assertEquals(0, app.getReservedContainers().size());
     assertEquals(0, app.getReservedContainers().size());
   }
   }
   
   
+  @Test
   public void testNoMoreCpuOnNode() {
   public void testNoMoreCpuOnNode() {
     RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 1),
     RMNode node1 = MockNodes.newNodeInfo(1, Resources.createResource(2048, 1),
         1, "127.0.0.1");
         1, "127.0.0.1");
@@ -2009,6 +2010,7 @@ public class TestFairScheduler {
     assertEquals(1, app.getLiveContainers().size());
     assertEquals(1, app.getLiveContainers().size());
   }
   }
 
 
+  @Test
   public void testBasicDRFAssignment() throws Exception {
   public void testBasicDRFAssignment() throws Exception {
     RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 5));
     RMNode node = MockNodes.newNodeInfo(1, BuilderUtils.newResource(8192, 5));
     NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);
     NodeAddedSchedulerEvent nodeEvent = new NodeAddedSchedulerEvent(node);

+ 1 - 2
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServices.java

@@ -408,8 +408,7 @@ public class TestRMWebServices extends JerseyTest {
     ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
     ClusterMetrics clusterMetrics = ClusterMetrics.getMetrics();
 
 
     long totalMBExpect = 
     long totalMBExpect = 
-        metrics.getReservedMB()+ metrics.getAvailableMB() 
-        + metrics.getAllocatedMB();
+        metrics.getAvailableMB() + metrics.getAllocatedMB();
 
 
     assertEquals("appsSubmitted doesn't match", 
     assertEquals("appsSubmitted doesn't match", 
         metrics.getAppsSubmitted(), submittedApps);
         metrics.getAppsSubmitted(), submittedApps);

+ 10 - 0
hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/MiniYARNCluster.java

@@ -304,6 +304,16 @@ public class MiniYARNCluster extends CompositeService {
                         MiniYARNCluster.getHostname() + ":0");
                         MiniYARNCluster.getHostname() + ":0");
         getConfig().set(YarnConfiguration.NM_WEBAPP_ADDRESS,
         getConfig().set(YarnConfiguration.NM_WEBAPP_ADDRESS,
                         MiniYARNCluster.getHostname() + ":0");
                         MiniYARNCluster.getHostname() + ":0");
+
+        // Disable resource checks by default
+        if (!getConfig().getBoolean(
+            YarnConfiguration.YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING,
+            YarnConfiguration.
+                DEFAULT_YARN_MINICLUSTER_CONTROL_RESOURCE_MONITORING)) {
+          getConfig().setBoolean(YarnConfiguration.NM_PMEM_CHECK_ENABLED, false);
+          getConfig().setBoolean(YarnConfiguration.NM_VMEM_CHECK_ENABLED, false);
+        }
+
         LOG.info("Starting NM: " + index);
         LOG.info("Starting NM: " + index);
         nodeManagers[index].init(getConfig());
         nodeManagers[index].init(getConfig());
         new Thread() {
         new Thread() {