Просмотр исходного кода

Copied branch-0.23.2 over as branch-0.23

git-svn-id: https://svn.apache.org/repos/asf/hadoop/common/branches/branch-0.23@1306727 13f79535-47bb-0310-9956-ffa450edef68
Arun Murthy 13 лет назад
Родитель
Сommit
86a92c9f85
100 измененных файлов с 1019 добавлено и 7661 удалено
  1. 1 1
      BUILDING.txt
  2. 2 2
      hadoop-assemblies/pom.xml
  3. 4 8
      hadoop-client/pom.xml
  4. 2 2
      hadoop-common-project/hadoop-annotations/pom.xml
  5. 2 2
      hadoop-common-project/hadoop-auth-examples/pom.xml
  6. 2 2
      hadoop-common-project/hadoop-auth/pom.xml
  7. 6 10
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java
  8. 2 2
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java
  9. 6 3
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java
  10. 0 70
      hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java
  11. 2 4
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java
  12. 26 26
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java
  13. 4 9
      hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
  14. 4 249
      hadoop-common-project/hadoop-common/CHANGES.txt
  15. 0 16
      hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml
  16. 8 34
      hadoop-common-project/hadoop-common/pom.xml
  17. 40 53
      hadoop-common-project/hadoop-common/src/main/conf/log4j.properties
  18. 4 7
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/HttpAuthentication.xml
  19. 2 0
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml
  20. 0 6
      hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml
  21. 35 65
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java
  22. 1 26
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java
  23. 0 9
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java
  24. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java
  25. 11 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java
  26. 0 917
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java
  27. 0 203
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java
  28. 0 65
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java
  29. 0 296
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java
  30. 0 129
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java
  31. 0 59
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java
  32. 0 56
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceStatus.java
  33. 0 78
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java
  34. 0 40
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthCheckFailedException.java
  35. 0 293
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java
  36. 0 194
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java
  37. 0 42
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ServiceFailedException.java
  38. 0 188
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java
  39. 0 314
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java
  40. 0 90
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java
  41. 0 387
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java
  42. 0 155
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java
  43. 0 39
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolPB.java
  44. 0 152
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java
  45. 0 4
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java
  46. 0 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java
  47. 7 15
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/DefaultFailoverProxyProvider.java
  48. 4 6
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java
  49. 23 98
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java
  50. 31 73
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java
  51. 5 33
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java
  52. 238 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroRpcEngine.java
  53. 15 11
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroSpecificRpcEngine.java
  54. 60 140
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java
  55. 121 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ConnectionHeader.java
  56. 0 49
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java
  57. 0 437
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java
  58. 0 39
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java
  59. 0 34
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoPB.java
  60. 0 122
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoServerSideTranslatorPB.java
  61. 0 42
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInterface.java
  62. 10 2
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java
  63. 4 17
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java
  64. 0 35
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolTranslator.java
  65. 28 342
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java
  66. 0 193
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java
  67. 3 13
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java
  68. 0 36
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcInvocationHandler.java
  69. 0 118
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcPayloadHeader.java
  70. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java
  71. 102 293
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java
  72. 1 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/StandbyException.java
  73. 1 0
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java
  74. 121 253
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java
  75. 1 10
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java
  76. 27 64
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java
  77. 0 7
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java
  78. 0 23
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java
  79. 0 147
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java
  80. 1 40
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java
  81. 0 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java
  82. 5 3
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java
  83. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java
  84. 0 321
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java
  85. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java
  86. 11 75
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java
  87. 15 43
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java
  88. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java
  89. 0 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java
  90. 3 21
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java
  91. 3 12
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java
  92. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetGroupsBase.java
  93. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java
  94. 1 1
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java
  95. 0 72
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java
  96. 0 49
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java
  97. 0 30
      hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java
  98. 0 2
      hadoop-common-project/hadoop-common/src/main/native/configure.ac
  99. 1 1
      hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh
  100. 2 4
      hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh

+ 1 - 1
BUILDING.txt

@@ -8,7 +8,7 @@ Requirements:
 * Maven 3.0
 * Maven 3.0
 * Forrest 0.8 (if generating docs)
 * Forrest 0.8 (if generating docs)
 * Findbugs 1.3.9 (if running findbugs)
 * Findbugs 1.3.9 (if running findbugs)
-* ProtocolBuffer 2.4.1+ (for MapReduce and HDFS)
+* ProtocolBuffer 2.4.1+ (for MapReduce)
 * Autotools (if compiling native code)
 * Autotools (if compiling native code)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 * Internet connection for first build (to fetch all Maven and Hadoop dependencies)
 
 

+ 2 - 2
hadoop-assemblies/pom.xml

@@ -20,12 +20,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.3-SNAPSHOT</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../hadoop-project</relativePath>
     <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-assemblies</artifactId>
   <artifactId>hadoop-assemblies</artifactId>
-  <version>0.23.3-SNAPSHOT</version>
+  <version>0.23.2-SNAPSHOT</version>
   <name>Apache Hadoop Assemblies</name>
   <name>Apache Hadoop Assemblies</name>
   <description>Apache Hadoop Assemblies</description>
   <description>Apache Hadoop Assemblies</description>
 
 

+ 4 - 8
hadoop-client/pom.xml

@@ -17,22 +17,18 @@
   <modelVersion>4.0.0</modelVersion>
   <modelVersion>4.0.0</modelVersion>
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
-    <artifactId>hadoop-project-dist</artifactId>
-    <version>0.23.3-SNAPSHOT</version>
-    <relativePath>../hadoop-project-dist</relativePath>
+    <artifactId>hadoop-project</artifactId>
+    <version>0.23.2-SNAPSHOT</version>
+    <relativePath>../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-client</artifactId>
   <artifactId>hadoop-client</artifactId>
-  <version>0.23.3-SNAPSHOT</version>
+  <version>0.23.2-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <description>Apache Hadoop Client</description>
   <description>Apache Hadoop Client</description>
   <name>Apache Hadoop Client</name>
   <name>Apache Hadoop Client</name>
 
 
-<properties>
-   <hadoop.component>client</hadoop.component>
- </properties>
-
   <dependencies>
   <dependencies>
     <dependency>
     <dependency>
       <groupId>org.apache.hadoop</groupId>
       <groupId>org.apache.hadoop</groupId>

+ 2 - 2
hadoop-common-project/hadoop-annotations/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.3-SNAPSHOT</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-annotations</artifactId>
   <artifactId>hadoop-annotations</artifactId>
-  <version>0.23.3-SNAPSHOT</version>
+  <version>0.23.2-SNAPSHOT</version>
   <description>Apache Hadoop Annotations</description>
   <description>Apache Hadoop Annotations</description>
   <name>Apache Hadoop Annotations</name>
   <name>Apache Hadoop Annotations</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>

+ 2 - 2
hadoop-common-project/hadoop-auth-examples/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.3-SNAPSHOT</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth-examples</artifactId>
   <artifactId>hadoop-auth-examples</artifactId>
-  <version>0.23.3-SNAPSHOT</version>
+  <version>0.23.2-SNAPSHOT</version>
   <packaging>war</packaging>
   <packaging>war</packaging>
 
 
   <name>Apache Hadoop Auth Examples</name>
   <name>Apache Hadoop Auth Examples</name>

+ 2 - 2
hadoop-common-project/hadoop-auth/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project</artifactId>
     <artifactId>hadoop-project</artifactId>
-    <version>0.23.3-SNAPSHOT</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project</relativePath>
     <relativePath>../../hadoop-project</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-auth</artifactId>
   <artifactId>hadoop-auth</artifactId>
-  <version>0.23.3-SNAPSHOT</version>
+  <version>0.23.2-SNAPSHOT</version>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
 
 
   <name>Apache Hadoop Auth</name>
   <name>Apache Hadoop Auth</name>

+ 6 - 10
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/client/KerberosAuthenticator.java

@@ -13,12 +13,12 @@
  */
  */
 package org.apache.hadoop.security.authentication.client;
 package org.apache.hadoop.security.authentication.client;
 
 
+import com.sun.security.auth.module.Krb5LoginModule;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.GSSName;
 import org.ietf.jgss.GSSName;
-import org.ietf.jgss.Oid;
+import sun.security.jgss.GSSUtil;
 
 
 import javax.security.auth.Subject;
 import javax.security.auth.Subject;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.AppConfigurationEntry;
@@ -26,7 +26,6 @@ import javax.security.auth.login.Configuration;
 import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginException;
 import javax.security.auth.login.LoginException;
 import java.io.IOException;
 import java.io.IOException;
-import java.lang.reflect.Field;
 import java.net.HttpURLConnection;
 import java.net.HttpURLConnection;
 import java.net.URL;
 import java.net.URL;
 import java.security.AccessControlContext;
 import java.security.AccessControlContext;
@@ -98,7 +97,7 @@ public class KerberosAuthenticator implements Authenticator {
     }
     }
 
 
     private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
     private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
-      new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
+      new AppConfigurationEntry(Krb5LoginModule.class.getName(),
                                 AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL,
                                 AppConfigurationEntry.LoginModuleControlFlag.OPTIONAL,
                                 USER_KERBEROS_OPTIONS);
                                 USER_KERBEROS_OPTIONS);
 
 
@@ -110,7 +109,7 @@ public class KerberosAuthenticator implements Authenticator {
       return USER_KERBEROS_CONF;
       return USER_KERBEROS_CONF;
     }
     }
   }
   }
-  
+
   private URL url;
   private URL url;
   private HttpURLConnection conn;
   private HttpURLConnection conn;
   private Base64 base64;
   private Base64 base64;
@@ -196,12 +195,9 @@ public class KerberosAuthenticator implements Authenticator {
           try {
           try {
             GSSManager gssManager = GSSManager.getInstance();
             GSSManager gssManager = GSSManager.getInstance();
             String servicePrincipal = "HTTP/" + KerberosAuthenticator.this.url.getHost();
             String servicePrincipal = "HTTP/" + KerberosAuthenticator.this.url.getHost();
-            
             GSSName serviceName = gssManager.createName(servicePrincipal,
             GSSName serviceName = gssManager.createName(servicePrincipal,
-                                                        GSSName.NT_HOSTBASED_SERVICE);
-            Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal, 
-                gssManager);
-            gssContext = gssManager.createContext(serviceName, oid, null,
+                                                        GSSUtil.NT_GSS_KRB5_PRINCIPAL);
+            gssContext = gssManager.createContext(serviceName, GSSUtil.GSS_KRB5_MECH_OID, null,
                                                   GSSContext.DEFAULT_LIFETIME);
                                                   GSSContext.DEFAULT_LIFETIME);
             gssContext.requestCredDeleg(true);
             gssContext.requestCredDeleg(true);
             gssContext.requestMutualAuth(true);
             gssContext.requestMutualAuth(true);

+ 2 - 2
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/server/KerberosAuthenticationHandler.java

@@ -15,9 +15,9 @@ package org.apache.hadoop.security.authentication.server;
 
 
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.AuthenticationException;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
+import com.sun.security.auth.module.Krb5LoginModule;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSCredential;
 import org.ietf.jgss.GSSCredential;
 import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.GSSManager;
@@ -95,7 +95,7 @@ public class KerberosAuthenticationHandler implements AuthenticationHandler {
       }
       }
 
 
       return new AppConfigurationEntry[]{
       return new AppConfigurationEntry[]{
-          new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
+        new AppConfigurationEntry(Krb5LoginModule.class.getName(),
                                   AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
                                   AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
                                   options),};
                                   options),};
     }
     }

+ 6 - 3
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosName.java

@@ -23,11 +23,12 @@ import java.util.ArrayList;
 import java.util.List;
 import java.util.List;
 import java.util.regex.Matcher;
 import java.util.regex.Matcher;
 import java.util.regex.Pattern;
 import java.util.regex.Pattern;
-import java.lang.reflect.Method;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
+import sun.security.krb5.Config;
+import sun.security.krb5.KrbException;
 
 
 /**
 /**
  * This class implements parsing and handling of Kerberos principal names. In
  * This class implements parsing and handling of Kerberos principal names. In
@@ -76,11 +77,13 @@ public class KerberosName {
   private static List<Rule> rules;
   private static List<Rule> rules;
 
 
   private static String defaultRealm;
   private static String defaultRealm;
+  private static Config kerbConf;
 
 
   static {
   static {
     try {
     try {
-      defaultRealm = KerberosUtil.getDefaultRealm();
-    } catch (Exception ke) {
+      kerbConf = Config.getInstance();
+      defaultRealm = kerbConf.getDefaultRealm();
+    } catch (KrbException ke) {
         defaultRealm="";
         defaultRealm="";
     }
     }
   }
   }

+ 0 - 70
hadoop-common-project/hadoop-auth/src/main/java/org/apache/hadoop/security/authentication/util/KerberosUtil.java

@@ -1,70 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security.authentication.util;
-
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
-
-import org.ietf.jgss.GSSException;
-import org.ietf.jgss.GSSManager;
-import org.ietf.jgss.Oid;
-
-public class KerberosUtil {
-
-  /* Return the Kerberos login module name */
-  public static String getKrb5LoginModuleName() {
-    return System.getProperty("java.vendor").contains("IBM")
-      ? "com.ibm.security.auth.module.Krb5LoginModule"
-      : "com.sun.security.auth.module.Krb5LoginModule";
-  }
-  
-  public static Oid getOidClassInstance(String servicePrincipal,
-      GSSManager gssManager) 
-      throws ClassNotFoundException, GSSException, NoSuchFieldException,
-      IllegalAccessException {
-    Class<?> oidClass;
-    if (System.getProperty("java.vendor").contains("IBM")) {
-      oidClass = Class.forName("com.ibm.security.jgss.GSSUtil");
-    } else {
-      oidClass = Class.forName("sun.security.jgss.GSSUtil");
-    }
-    Field oidField = oidClass.getDeclaredField("GSS_KRB5_MECH_OID");
-    return (Oid)oidField.get(oidClass);
-  }
-
-  public static String getDefaultRealm() 
-      throws ClassNotFoundException, NoSuchMethodException, 
-      IllegalArgumentException, IllegalAccessException, 
-      InvocationTargetException {
-    Object kerbConf;
-    Class<?> classRef;
-    Method getInstanceMethod;
-    Method getDefaultRealmMethod;
-    if (System.getProperty("java.vendor").contains("IBM")) {
-      classRef = Class.forName("com.ibm.security.krb5.internal.Config");
-    } else {
-      classRef = Class.forName("sun.security.krb5.Config");
-    }
-    getInstanceMethod = classRef.getMethod("getInstance", new Class[0]);
-    kerbConf = getInstanceMethod.invoke(classRef, new Object[0]);
-    getDefaultRealmMethod = classRef.getDeclaredMethod("getDefaultRealm",
-         new Class[0]);
-    return (String)getDefaultRealmMethod.invoke(kerbConf, new Object[0]);
-  }
-}

+ 2 - 4
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/KerberosTestUtils.java

@@ -13,15 +13,13 @@
  */
  */
 package org.apache.hadoop.security.authentication;
 package org.apache.hadoop.security.authentication;
 
 
+import com.sun.security.auth.module.Krb5LoginModule;
 
 
 import javax.security.auth.Subject;
 import javax.security.auth.Subject;
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.kerberos.KerberosPrincipal;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.AppConfigurationEntry;
 import javax.security.auth.login.Configuration;
 import javax.security.auth.login.Configuration;
 import javax.security.auth.login.LoginContext;
 import javax.security.auth.login.LoginContext;
-
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
-
 import java.io.File;
 import java.io.File;
 import java.security.Principal;
 import java.security.Principal;
 import java.security.PrivilegedActionException;
 import java.security.PrivilegedActionException;
@@ -90,7 +88,7 @@ public class KerberosTestUtils {
       options.put("debug", "true");
       options.put("debug", "true");
 
 
       return new AppConfigurationEntry[]{
       return new AppConfigurationEntry[]{
-        new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
+        new AppConfigurationEntry(Krb5LoginModule.class.getName(),
                                   AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
                                   AppConfigurationEntry.LoginModuleControlFlag.REQUIRED,
                                   options),};
                                   options),};
     }
     }

+ 26 - 26
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAuthenticationFilter.java

@@ -41,14 +41,14 @@ public class TestAuthenticationFilter extends TestCase {
     FilterConfig config = Mockito.mock(FilterConfig.class);
     FilterConfig config = Mockito.mock(FilterConfig.class);
     Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("");
     Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("");
     Mockito.when(config.getInitParameter("a")).thenReturn("A");
     Mockito.when(config.getInitParameter("a")).thenReturn("A");
-    Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("a")).elements());
+    Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("a")).elements());
     Properties props = filter.getConfiguration("", config);
     Properties props = filter.getConfiguration("", config);
     assertEquals("A", props.getProperty("a"));
     assertEquals("A", props.getProperty("a"));
 
 
     config = Mockito.mock(FilterConfig.class);
     config = Mockito.mock(FilterConfig.class);
     Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo");
     Mockito.when(config.getInitParameter(AuthenticationFilter.CONFIG_PREFIX)).thenReturn("foo");
     Mockito.when(config.getInitParameter("foo.a")).thenReturn("A");
     Mockito.when(config.getInitParameter("foo.a")).thenReturn("A");
-    Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>(Arrays.asList("foo.a")).elements());
+    Mockito.when(config.getInitParameterNames()).thenReturn(new Vector(Arrays.asList("foo.a")).elements());
     props = filter.getConfiguration("foo.", config);
     props = filter.getConfiguration("foo.", config);
     assertEquals("A", props.getProperty("a"));
     assertEquals("A", props.getProperty("a"));
   }
   }
@@ -57,7 +57,7 @@ public class TestAuthenticationFilter extends TestCase {
     AuthenticationFilter filter = new AuthenticationFilter();
     AuthenticationFilter filter = new AuthenticationFilter();
     try {
     try {
       FilterConfig config = Mockito.mock(FilterConfig.class);
       FilterConfig config = Mockito.mock(FilterConfig.class);
-      Mockito.when(config.getInitParameterNames()).thenReturn(new Vector<String>().elements());
+      Mockito.when(config.getInitParameterNames()).thenReturn(new Vector().elements());
       filter.init(config);
       filter.init(config);
       fail();
       fail();
     } catch (ServletException ex) {
     } catch (ServletException ex) {
@@ -119,7 +119,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY)).elements());
       filter.init(config);
       filter.init(config);
       assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
       assertEquals(PseudoAuthenticationHandler.class, filter.getAuthenticationHandler().getClass());
@@ -138,7 +138,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("simple");
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
       filter.init(config);
       filter.init(config);
       assertFalse(filter.isRandomSecret());
       assertFalse(filter.isRandomSecret());
@@ -154,7 +154,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
       Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
       Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
       Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.COOKIE_DOMAIN,
                                  AuthenticationFilter.COOKIE_DOMAIN,
                                  AuthenticationFilter.COOKIE_PATH)).elements());
                                  AuthenticationFilter.COOKIE_PATH)).elements());
       filter.init(config);
       filter.init(config);
@@ -173,7 +173,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
       filter.init(config);
       filter.init(config);
       assertTrue(DummyAuthenticationHandler.init);
       assertTrue(DummyAuthenticationHandler.init);
     } finally {
     } finally {
@@ -187,7 +187,7 @@ public class TestAuthenticationFilter extends TestCase {
       FilterConfig config = Mockito.mock(FilterConfig.class);
       FilterConfig config = Mockito.mock(FilterConfig.class);
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn("kerberos");
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
       filter.init(config);
       filter.init(config);
     } catch (ServletException ex) {
     } catch (ServletException ex) {
       // Expected
       // Expected
@@ -204,7 +204,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -225,7 +225,7 @@ public class TestAuthenticationFilter extends TestCase {
         DummyAuthenticationHandler.class.getName());
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
       filter.init(config);
       filter.init(config);
 
 
@@ -254,7 +254,7 @@ public class TestAuthenticationFilter extends TestCase {
         DummyAuthenticationHandler.class.getName());
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
       filter.init(config);
       filter.init(config);
 
 
@@ -288,7 +288,7 @@ public class TestAuthenticationFilter extends TestCase {
         DummyAuthenticationHandler.class.getName());
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
       filter.init(config);
       filter.init(config);
 
 
@@ -321,7 +321,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -332,7 +332,7 @@ public class TestAuthenticationFilter extends TestCase {
       FilterChain chain = Mockito.mock(FilterChain.class);
       FilterChain chain = Mockito.mock(FilterChain.class);
 
 
       Mockito.doAnswer(
       Mockito.doAnswer(
-        new Answer<Object>() {
+        new Answer() {
           @Override
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
           public Object answer(InvocationOnMock invocation) throws Throwable {
             fail();
             fail();
@@ -358,7 +358,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TOKEN_VALIDITY)).thenReturn("1000");
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameter(AuthenticationFilter.SIGNATURE_SECRET)).thenReturn("secret");
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY,
                                  AuthenticationFilter.AUTH_TOKEN_VALIDITY,
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
                                  AuthenticationFilter.SIGNATURE_SECRET)).elements());
 
 
@@ -366,7 +366,7 @@ public class TestAuthenticationFilter extends TestCase {
         Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
         Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_DOMAIN)).thenReturn(".foo.com");
         Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
         Mockito.when(config.getInitParameter(AuthenticationFilter.COOKIE_PATH)).thenReturn("/bar");
         Mockito.when(config.getInitParameterNames()).thenReturn(
         Mockito.when(config.getInitParameterNames()).thenReturn(
-          new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
+          new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE,
                                    AuthenticationFilter.AUTH_TOKEN_VALIDITY,
                                    AuthenticationFilter.AUTH_TOKEN_VALIDITY,
                                    AuthenticationFilter.SIGNATURE_SECRET,
                                    AuthenticationFilter.SIGNATURE_SECRET,
                                    AuthenticationFilter.COOKIE_DOMAIN,
                                    AuthenticationFilter.COOKIE_DOMAIN,
@@ -387,7 +387,7 @@ public class TestAuthenticationFilter extends TestCase {
       final boolean[] calledDoFilter = new boolean[1];
       final boolean[] calledDoFilter = new boolean[1];
 
 
       Mockito.doAnswer(
       Mockito.doAnswer(
-        new Answer<Object>() {
+        new Answer() {
           @Override
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
           public Object answer(InvocationOnMock invocation) throws Throwable {
             calledDoFilter[0] = true;
             calledDoFilter[0] = true;
@@ -398,7 +398,7 @@ public class TestAuthenticationFilter extends TestCase {
 
 
       final Cookie[] setCookie = new Cookie[1];
       final Cookie[] setCookie = new Cookie[1];
       Mockito.doAnswer(
       Mockito.doAnswer(
-        new Answer<Object>() {
+        new Answer() {
           @Override
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
           public Object answer(InvocationOnMock invocation) throws Throwable {
             Object[] args = invocation.getArguments();
             Object[] args = invocation.getArguments();
@@ -451,7 +451,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -470,7 +470,7 @@ public class TestAuthenticationFilter extends TestCase {
       FilterChain chain = Mockito.mock(FilterChain.class);
       FilterChain chain = Mockito.mock(FilterChain.class);
 
 
       Mockito.doAnswer(
       Mockito.doAnswer(
-        new Answer<Object>() {
+        new Answer() {
           @Override
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
           public Object answer(InvocationOnMock invocation) throws Throwable {
             Object[] args = invocation.getArguments();
             Object[] args = invocation.getArguments();
@@ -496,7 +496,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -515,7 +515,7 @@ public class TestAuthenticationFilter extends TestCase {
       FilterChain chain = Mockito.mock(FilterChain.class);
       FilterChain chain = Mockito.mock(FilterChain.class);
 
 
       Mockito.doAnswer(
       Mockito.doAnswer(
-        new Answer<Object>() {
+        new Answer() {
           @Override
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
           public Object answer(InvocationOnMock invocation) throws Throwable {
             fail();
             fail();
@@ -526,7 +526,7 @@ public class TestAuthenticationFilter extends TestCase {
 
 
       final Cookie[] setCookie = new Cookie[1];
       final Cookie[] setCookie = new Cookie[1];
       Mockito.doAnswer(
       Mockito.doAnswer(
-        new Answer<Object>() {
+        new Answer() {
           @Override
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
           public Object answer(InvocationOnMock invocation) throws Throwable {
             Object[] args = invocation.getArguments();
             Object[] args = invocation.getArguments();
@@ -556,7 +556,7 @@ public class TestAuthenticationFilter extends TestCase {
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
       Mockito.when(config.getInitParameter(AuthenticationFilter.AUTH_TYPE)).thenReturn(
         DummyAuthenticationHandler.class.getName());
         DummyAuthenticationHandler.class.getName());
       Mockito.when(config.getInitParameterNames()).thenReturn(
       Mockito.when(config.getInitParameterNames()).thenReturn(
-        new Vector<String>(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
+        new Vector(Arrays.asList(AuthenticationFilter.AUTH_TYPE)).elements());
       filter.init(config);
       filter.init(config);
 
 
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
       HttpServletRequest request = Mockito.mock(HttpServletRequest.class);
@@ -575,7 +575,7 @@ public class TestAuthenticationFilter extends TestCase {
       FilterChain chain = Mockito.mock(FilterChain.class);
       FilterChain chain = Mockito.mock(FilterChain.class);
 
 
       Mockito.doAnswer(
       Mockito.doAnswer(
-        new Answer<Object>() {
+        new Answer() {
           @Override
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
           public Object answer(InvocationOnMock invocation) throws Throwable {
             fail();
             fail();
@@ -586,7 +586,7 @@ public class TestAuthenticationFilter extends TestCase {
 
 
       final Cookie[] setCookie = new Cookie[1];
       final Cookie[] setCookie = new Cookie[1];
       Mockito.doAnswer(
       Mockito.doAnswer(
-        new Answer<Object>() {
+        new Answer() {
           @Override
           @Override
           public Object answer(InvocationOnMock invocation) throws Throwable {
           public Object answer(InvocationOnMock invocation) throws Throwable {
             Object[] args = invocation.getArguments();
             Object[] args = invocation.getArguments();

+ 4 - 9
hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java

@@ -19,16 +19,14 @@ import org.apache.hadoop.security.authentication.client.KerberosAuthenticator;
 import junit.framework.TestCase;
 import junit.framework.TestCase;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.commons.codec.binary.Base64;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSContext;
 import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.GSSManager;
 import org.ietf.jgss.GSSName;
 import org.ietf.jgss.GSSName;
 import org.mockito.Mockito;
 import org.mockito.Mockito;
-import org.ietf.jgss.Oid;
+import sun.security.jgss.GSSUtil;
 
 
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletRequest;
 import javax.servlet.http.HttpServletResponse;
 import javax.servlet.http.HttpServletResponse;
-import java.lang.reflect.Field;
 import java.util.Properties;
 import java.util.Properties;
 import java.util.concurrent.Callable;
 import java.util.concurrent.Callable;
 
 
@@ -145,12 +143,9 @@ public class TestKerberosAuthenticationHandler extends TestCase {
         GSSContext gssContext = null;
         GSSContext gssContext = null;
         try {
         try {
           String servicePrincipal = KerberosTestUtils.getServerPrincipal();
           String servicePrincipal = KerberosTestUtils.getServerPrincipal();
-          GSSName serviceName = gssManager.createName(servicePrincipal,
-              GSSName.NT_HOSTBASED_SERVICE);
-          Oid oid = KerberosUtil.getOidClassInstance(servicePrincipal, 
-              gssManager);
-          gssContext = gssManager.createContext(serviceName, oid, null,
-                                                  GSSContext.DEFAULT_LIFETIME);
+          GSSName serviceName = gssManager.createName(servicePrincipal, GSSUtil.NT_GSS_KRB5_PRINCIPAL);
+          gssContext = gssManager.createContext(serviceName, GSSUtil.GSS_KRB5_MECH_OID, null,
+                                                GSSContext.DEFAULT_LIFETIME);
           gssContext.requestCredDeleg(true);
           gssContext.requestCredDeleg(true);
           gssContext.requestMutualAuth(true);
           gssContext.requestMutualAuth(true);
 
 

+ 4 - 249
hadoop-common-project/hadoop-common/CHANGES.txt

@@ -1,245 +1,6 @@
 Hadoop Change Log
 Hadoop Change Log
 
 
-Release 0.23.3 - UNRELEASED
-
-  INCOMPATIBLE CHANGES
-
-    HADOOP-7920. Remove Avro Rpc. (suresh)
-
-  NEW FEATURES
-
-    HADOOP-7773. Add support for protocol buffer based RPC engine.
-    (suresh)
-
-    HADOOP-7875. Add helper class to unwrap protobuf ServiceException.
-    (suresh)
-
-    HADOOP-7454. Common side of High Availability Framework (HDFS-1623)
-    Contributed by Todd Lipcon, Aaron T. Myers, Eli Collins, Uma Maheswara Rao G,
-    Bikas Saha, Suresh Srinivas, Jitendra Nath Pandey, Hari Mankude, Brandon Li,
-    Sanjay Radia, Mingjie Lai, and Gregory Chanan
-
-    HADOOP-8121. Active Directory Group Mapping Service. (Jonathan Natkins via
-    atm)
-
-    HADOOP-7030. Add TableMapping topology implementation to read host to rack
-    mapping from a file. (Patrick Angeles and tomwhite via tomwhite)
-
-    HADOOP-8206. Common portion of a ZK-based failover controller (todd)
-
-  IMPROVEMENTS
-
-    HADOOP-7524. Change RPC to allow multiple protocols including multuple
-    versions of the same protocol (sanjay Radia)
-
-    HADOOP-7607. Simplify the RPC proxy cleanup process. (atm)
-
-    HADOOP-7687. Make getProtocolSignature public  (sanjay)
-
-    HADOOP-7693. Enhance AvroRpcEngine to support the new #addProtocol
-    interface introduced in HADOOP-7524.  (cutting)
-
-    HADOOP-7716. RPC protocol registration on SS does not log the protocol name
-    (only the class which may be different) (sanjay)
-
-    HADOOP-7776. Make the Ipc-Header in a RPC-Payload an explicit header.
-    (sanjay)
-
-    HADOOP-7717. Move handling of concurrent client fail-overs to
-    RetryInvocationHandler (atm)
-
-    HADOOP-7862. Move the support for multiple protocols to lower layer so
-    that Writable, PB and Avro can all use it (Sanjay)
-
-    HADOOP-7876. Provided access to encoded key in DelegationKey for
-    use in protobuf based RPCs. (suresh)
-
-    HADOOP-7899. Generate proto java files as part of the build. (tucu)
-
-    HADOOP-7957. Classes deriving GetGroupsBase should be able to override 
-    proxy creation. (jitendra)
-
-    HADOOP-7965. Support for protocol version and signature in PB. (jitendra)
-
-    HADOOP-8070. Add a standalone benchmark for RPC call performance. (todd)
-
-    HADOOP-8084. Updates ProtoBufRpc engine to not do an unnecessary copy 
-    for RPC request/response. (ddas)
-
-    HADOOP-8085. Add RPC metrics to ProtobufRpcEngine. (Hari Mankude via
-    suresh)
-
-    HADOOP-7621. alfredo config should be in a file not readable by users
-                 (Alejandro Abdelnur via atm)
-    HADOOP-8098. KerberosAuthenticatorHandler should use _HOST replacement to 
-    resolve principal name (tucu)
-
-    HADOOP-8118.  In metrics2.util.MBeans, change log level to trace for the
-    stack trace of InstanceAlreadyExistsException.  (szetszwo)
-
-    HADOOP-7994. Remove getProtocolVersion and getProtocolSignature from the
-    client side translator and server side implementation. (jitendra)
-
-    HADOOP-8125. make hadoop-client set of curated jars available in a
-    distribution tarball (rvs via tucu)
-
-    HADOOP-8142. Update versions from 0.23.2 to 0.23.3 in the build files.
-    (szetszwo)
-
-    HADOOP-8141. Add method to SecurityUtil to init krb5 cipher suites.
-    (todd)
-
-    HADOOP-8108. Move method getHostPortString() from NameNode to NetUtils.
-    (Brandon Li via jitendra)
-
-    HADOOP-7557 Make IPC header be extensible (sanjay radia)
-
-    HADOOP-7806. Support binding to sub-interfaces (eli)
-
-    HADOOP-6941. Adds support for building Hadoop with IBM's JDK 
-    (Stephen Watt, Eli and ddas)
-
-    HADOOP-8183. Stop using "mapred.used.genericoptions.parser" (harsh)
-
-    HADOOP-7788. Add simple HealthMonitor class to watch an HAService (todd)
-
-    HADOOP-8200. Remove HADOOP_[JOBTRACKER|TASKTRACKER]_OPTS. (eli)
-
-    HADOOP-8184.  ProtoBuf RPC engine uses the IPC layer reply packet.
-    (Sanjay Radia via szetszwo)
-
-    HADOOP-8163. Improve ActiveStandbyElector to provide hooks for
-    fencing old active. (todd)
-
-    HADOOP-8193. Refactor FailoverController/HAAdmin code to add an abstract
-    class for "target" services. (todd)
-
-    HADOOP-8212. Improve ActiveStandbyElector's behavior when session expires
-    (todd)
-
-    HADOOP-8216. Address log4j.properties inconsistencies btw main and
-    template dirs. (Patrick Hunt via eli)
-
-  OPTIMIZATIONS
-
-  BUG FIXES
-
-    HADOOP-7900. LocalDirAllocator confChanged() accesses conf.get() twice
-    (Ravi Gummadi via Uma Maheswara Rao G)
-
-    HADOOP-7635. RetryInvocationHandler should release underlying resources on
-    close. (atm)
-
-    HADOOP-7695. RPC.stopProxy can throw unintended exception while logging
-    error. (atm)
-
-    HADOOP-7833. Fix findbugs warnings in protobuf generated code.
-    (John Lee via suresh)
-
-    HADOOP-7897. ProtobufRpcEngine client side exception mechanism is not
-    consistent with WritableRpcEngine. (suresh)
-
-    HADOOP-7913. Fix bug in ProtoBufRpcEngine.  (sanjay)
-
-    HADOOP-7892. IPC logs too verbose after "RpcKind" introduction. (todd)
-
-    HADOOP-7968. Errant println left in RPC.getHighestSupportedProtocol. (Sho
-    Shimauchi via harsh)
-
-    HADOOP-7931. o.a.h.ipc.WritableRpcEngine should have a way to force
-    initialization. (atm)
-
-    HADOOP-8104. Inconsistent Jackson versions (tucu)
-
-    HADOOP-7940. The Text.clear() method does not clear the bytes as intended. (Csaba Miklos via harsh)
-
-    HADOOP-8119. Fix javac warnings in TestAuthenticationFilter in hadoop-auth.
-    (szetszwo)
-
-    HADOOP-7888. TestFailoverProxy fails intermittently on trunk. (Jason Lowe
-    via atm)
-
-    HADOOP-8154. DNS#getIPs shouldn't silently return the local host
-    IP for bogus interface names. (eli)
-
-    HADOOP-8169.  javadoc generation fails with java.lang.OutOfMemoryError:
-    Java heap space (tgraves via bobby)
-
-    HADOOP-8167. Configuration deprecation logic breaks backwards compatibility (tucu)
-
-    HADOOP-8189. LdapGroupsMapping shouldn't throw away IOException. (Jonathan Natkins via atm)
-
-    HADOOP-8191. SshFenceByTcpPort uses netcat incorrectly (todd)
-
-    HADOOP-8157. Fix race condition in Configuration that could cause spurious
-    ClassNotFoundExceptions after a GC. (todd)
-
-    HADOOP-8197. Configuration logs WARNs on every use of a deprecated key (tucu)
-
-    HADOOP-8159. NetworkTopology: getLeaf should check for invalid topologies.
-    (Colin Patrick McCabe via eli)
-
-    HADOOP-8204. TestHealthMonitor fails occasionally (todd)
-
-    HADOOP-8202. RPC stopProxy() does not close the proxy correctly.
-    (Hari Mankude via suresh)
-
-    HADOOP-8218. RPC.closeProxy shouldn't throw error when closing a mock
-    (todd)
-
-  BREAKDOWN OF HADOOP-7454 SUBTASKS
-
-    HADOOP-7455. HA: Introduce HA Service Protocol Interface. (suresh)
-    
-    HADOOP-7774. HA: Administrative CLI to control HA daemons. (todd)
-    
-    HADOOP-7896. HA: if both NNs are in Standby mode, client needs to try failing
-    back and forth several times with sleeps. (atm)
-    
-    HADOOP-7922. Improve some logging for client IPC failovers and
-    StandbyExceptions (todd)
-    
-    HADOOP-7921. StandbyException should extend IOException (todd)
-    
-    HADOOP-7928. HA: Client failover policy is incorrectly trying to fail over all
-    IOExceptions (atm)
-    
-    HADOOP-7925. Add interface and update CLI to query current state to
-    HAServiceProtocol (eli via todd)
-    
-    HADOOP-7932. Make client connection retries on socket time outs configurable.
-    (Uma Maheswara Rao G via todd)
-    
-    HADOOP-7924. FailoverController for client-based configuration (eli)
-    
-    HADOOP-7961. Move HA fencing to common. (eli)
-    
-    HADOOP-7970. HAServiceProtocol methods must throw IOException.  (Hari Mankude
-    via suresh).
-    
-    HADOOP-7992. Add ZKClient library to facilitate leader election.  (Bikas Saha
-    via suresh).
-    
-    HADOOP-7983. HA: failover should be able to pass args to fencers. (eli)
-    
-    HADOOP-7938. HA: the FailoverController should optionally fence the active
-    during failover. (eli)
-    
-    HADOOP-7991. HA: the FailoverController should check the standby is ready
-    before failing over. (eli)
-    
-    HADOOP-8038. Add 'ipc.client.connect.max.retries.on.timeouts' entry in
-    core-default.xml file. (Uma Maheswara Rao G via atm)
-    
-    HADOOP-8041. Log a warning when a failover is first attempted (todd)
-    
-    HADOOP-8068. void methods can swallow exceptions when going through failover
-    path (todd)
-    
-    HADOOP-8116. RetriableCommand is using RetryPolicy incorrectly after
-    HADOOP-7896. (atm)
-
-Release 0.23.2 - UNRELEASED 
+Release 0.23.2 - UNRELEASED
 
 
   NEW FEATURES
   NEW FEATURES
 
 
@@ -267,6 +28,9 @@ Release 0.23.2 - UNRELEASED
 
 
   BUG FIXES
   BUG FIXES
 
 
+    HADOOP-8146.  FsShell commands cannot be interrupted
+    (Daryn Sharp via Uma Maheswara Rao G)
+
     HADOOP-7660. Maven generated .classpath doesnot includes 
     HADOOP-7660. Maven generated .classpath doesnot includes 
     "target/generated-test-source/java" as source directory.
     "target/generated-test-source/java" as source directory.
     (Laxman via bobby)
     (Laxman via bobby)
@@ -333,9 +97,6 @@ Release 0.23.2 - UNRELEASED
     HADOOP-8176.  Disambiguate the destination of FsShell copies (Daryn Sharp
     HADOOP-8176.  Disambiguate the destination of FsShell copies (Daryn Sharp
     via bobby)
     via bobby)
 
 
-    HADOOP-8088. User-group mapping cache incorrectly does negative caching on
-    transient failures (Khiwal Lee via bobby)
-
 Release 0.23.1 - 2012-02-17 
 Release 0.23.1 - 2012-02-17 
 
 
   INCOMPATIBLE CHANGES
   INCOMPATIBLE CHANGES
@@ -431,9 +192,6 @@ Release 0.23.1 - 2012-02-17
 
 
     HADOOP-7470. Move up to Jackson 1.8.8.  (Enis Soztutar via szetszwo)
     HADOOP-7470. Move up to Jackson 1.8.8.  (Enis Soztutar via szetszwo)
 
 
-    HADOOP-7729. Send back valid HTTP response if user hits IPC port with
-    HTTP GET. (todd)
-
     HADOOP-8027. Visiting /jmx on the daemon web interfaces may print
     HADOOP-8027. Visiting /jmx on the daemon web interfaces may print
     unnecessary error in logs. (atm)
     unnecessary error in logs. (atm)
 
 
@@ -1569,9 +1327,6 @@ Release 0.22.0 - 2011-11-29
 
 
     HADOOP-7786. Remove HDFS-specific config keys defined in FsConfig. (eli)
     HADOOP-7786. Remove HDFS-specific config keys defined in FsConfig. (eli)
 
 
-    HADOOP-7358. Improve log levels when exceptions caught in RPC handler
-    (Todd Lipcon via shv)
-
   OPTIMIZATIONS
   OPTIMIZATIONS
 
 
     HADOOP-6884. Add LOG.isDebugEnabled() guard for each LOG.debug(..).
     HADOOP-6884. Add LOG.isDebugEnabled() guard for each LOG.debug(..).

+ 0 - 16
hadoop-common-project/hadoop-common/dev-support/findbugsExcludeFile.xml

@@ -270,20 +270,4 @@
       <!-- backward compatibility -->
       <!-- backward compatibility -->
       <Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
       <Bug pattern="NM_SAME_SIMPLE_NAME_AS_SUPERCLASS"/>
     </Match>
     </Match>
-    <Match>
-      <!-- protobuf generated code -->
-      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.HadoopRpcProtos.*"/>
-    </Match>
-    <Match>
-      <!-- protobuf generated code -->
-      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.ProtocolInfoProtos.*"/>
-    </Match>
-    <Match>
-      <!-- protobuf generated code -->
-      <Class name="~org\.apache\.hadoop\.ipc\.protobuf\.IpcConnectionContextProtos.*"/>
-    </Match>
-    <Match>
-      <!-- protobuf generated code -->
-      <Class name="~org\.apache\.hadoop\.ha\.proto\.HAServiceProtocolProtos.*"/>
-    </Match>
  </FindBugsFilter>
  </FindBugsFilter>

+ 8 - 34
hadoop-common-project/hadoop-common/pom.xml

@@ -17,12 +17,12 @@
   <parent>
   <parent>
     <groupId>org.apache.hadoop</groupId>
     <groupId>org.apache.hadoop</groupId>
     <artifactId>hadoop-project-dist</artifactId>
     <artifactId>hadoop-project-dist</artifactId>
-    <version>0.23.3-SNAPSHOT</version>
+    <version>0.23.2-SNAPSHOT</version>
     <relativePath>../../hadoop-project-dist</relativePath>
     <relativePath>../../hadoop-project-dist</relativePath>
   </parent>
   </parent>
   <groupId>org.apache.hadoop</groupId>
   <groupId>org.apache.hadoop</groupId>
   <artifactId>hadoop-common</artifactId>
   <artifactId>hadoop-common</artifactId>
-  <version>0.23.3-SNAPSHOT</version>
+  <version>0.23.2-SNAPSHOT</version>
   <description>Apache Hadoop Common</description>
   <description>Apache Hadoop Common</description>
   <name>Apache Hadoop Common</name>
   <name>Apache Hadoop Common</name>
   <packaging>jar</packaging>
   <packaging>jar</packaging>
@@ -239,6 +239,11 @@
       <artifactId>avro</artifactId>
       <artifactId>avro</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
+    <dependency>
+      <groupId>org.apache.avro</groupId>
+      <artifactId>avro-ipc</artifactId>
+      <scope>compile</scope>
+    </dependency>
     <dependency>
     <dependency>
       <groupId>net.sf.kosmosfs</groupId>
       <groupId>net.sf.kosmosfs</groupId>
       <artifactId>kfs</artifactId>
       <artifactId>kfs</artifactId>
@@ -264,38 +269,6 @@
       <artifactId>json-simple</artifactId>
       <artifactId>json-simple</artifactId>
       <scope>compile</scope>
       <scope>compile</scope>
     </dependency>
     </dependency>
-    <dependency>
-      <groupId>com.jcraft</groupId>
-      <artifactId>jsch</artifactId>
-    </dependency>
-
-    <dependency>
-      <groupId>org.apache.zookeeper</groupId>
-      <artifactId>zookeeper</artifactId>
-      <version>3.4.2</version>
-      <exclusions>
-        <exclusion>
-          <!-- otherwise seems to drag in junit 3.8.1 via jline -->
-          <groupId>junit</groupId>
-          <artifactId>junit</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jdmk</groupId>
-          <artifactId>jmxtools</artifactId>
-        </exclusion>
-        <exclusion>
-          <groupId>com.sun.jmx</groupId>
-          <artifactId>jmxri</artifactId>
-        </exclusion>
-      </exclusions>
-    </dependency>
-    <dependency>
-      <groupId>org.apache.zookeeper</groupId>
-      <artifactId>zookeeper</artifactId>
-      <version>3.4.2</version>
-      <type>test-jar</type>
-      <scope>test</scope>
-    </dependency>
   </dependencies>
   </dependencies>
 
 
   <build>
   <build>
@@ -309,6 +282,7 @@
             <phase>generate-test-sources</phase>
             <phase>generate-test-sources</phase>
             <goals>
             <goals>
               <goal>schema</goal>
               <goal>schema</goal>
+              <goal>protocol</goal>
             </goals>
             </goals>
           </execution>
           </execution>
         </executions>
         </executions>

+ 40 - 53
hadoop-common-project/hadoop-common/src/main/conf/log4j.properties

@@ -1,26 +1,17 @@
-# Copyright 2011 The Apache Software Foundation
-# 
-# Licensed to the Apache Software Foundation (ASF) under one
-# or more contributor license agreements.  See the NOTICE file
-# distributed with this work for additional information
-# regarding copyright ownership.  The ASF licenses this file
-# to you under the Apache License, Version 2.0 (the
-# "License"); you may not use this file except in compliance
-# with the License.  You may obtain a copy of the License at
-#
-#     http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.
-
 # Define some default values that can be overridden by system properties
 # Define some default values that can be overridden by system properties
 hadoop.root.logger=INFO,console
 hadoop.root.logger=INFO,console
 hadoop.log.dir=.
 hadoop.log.dir=.
 hadoop.log.file=hadoop.log
 hadoop.log.file=hadoop.log
 
 
+#
+# Job Summary Appender 
+#
+# Use following logger to send summary to separate file defined by 
+# hadoop.mapreduce.jobsummary.log.file rolled daily:
+# hadoop.mapreduce.jobsummary.logger=INFO,JSA
+# 
+hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
+hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
 
 
 # Define the root logger to the system property "hadoop.root.logger".
 # Define the root logger to the system property "hadoop.root.logger".
 log4j.rootLogger=${hadoop.root.logger}, EventCounter
 log4j.rootLogger=${hadoop.root.logger}, EventCounter
@@ -84,39 +75,16 @@ log4j.appender.TLA.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 #
 #
 #Security appender
 #Security appender
 #
 #
-hadoop.security.logger=INFO,console
-log4j.category.SecurityLogger=${hadoop.security.logger}
 hadoop.security.log.file=SecurityAuth.audit
 hadoop.security.log.file=SecurityAuth.audit
 log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
 log4j.appender.DRFAS=org.apache.log4j.DailyRollingFileAppender 
 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
 log4j.appender.DRFAS.File=${hadoop.log.dir}/${hadoop.security.log.file}
+
 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.DRFAS.layout=org.apache.log4j.PatternLayout
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
 log4j.appender.DRFAS.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
-log4j.appender.DRFAS.DatePattern=.yyyy-MM-dd
-
-
-#
-# hdfs audit logging
-#
-hdfs.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=${hdfs.audit.logger}
-log4j.additivity.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=false
-log4j.appender.DRFAAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.DRFAAUDIT.File=${hadoop.log.dir}/hdfs-audit.log
-log4j.appender.DRFAAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.DRFAAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.DRFAAUDIT.DatePattern=.yyyy-MM-dd
-
-#
-# mapred audit logging
-#
-mapred.audit.logger=INFO,console
-log4j.logger.org.apache.hadoop.mapred.AuditLogger=${mapred.audit.logger}
-log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
-log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
-log4j.appender.MRAUDIT.File=${hadoop.log.dir}/mapred-audit.log
-log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
-log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c{2}: %m%n
-log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+#new logger
+# Define some default values that can be overridden by system properties
+hadoop.security.logger=INFO,console
+log4j.category.SecurityLogger=${hadoop.security.logger}
 
 
 #
 #
 # Rolling File Appender
 # Rolling File Appender
@@ -133,6 +101,11 @@ log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
 #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
 #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} - %m%n
 #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
 #log4j.appender.RFA.layout.ConversionPattern=%d{ISO8601} %-5p %c{2} (%F:%M(%L)) - %m%n
 
 
+#
+# FSNamesystem Audit logging
+# All audit events are logged at INFO level
+#
+log4j.logger.org.apache.hadoop.hdfs.server.namenode.FSNamesystem.audit=WARN
 
 
 # Custom Logging levels
 # Custom Logging levels
 
 
@@ -150,14 +123,8 @@ log4j.logger.org.jets3t.service.impl.rest.httpclient.RestS3Service=ERROR
 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 log4j.appender.EventCounter=org.apache.hadoop.log.metrics.EventCounter
 
 
 #
 #
-# Job Summary Appender 
+# Job Summary Appender
 #
 #
-# Use following logger to send summary to separate file defined by 
-# hadoop.mapreduce.jobsummary.log.file rolled daily:
-# hadoop.mapreduce.jobsummary.logger=INFO,JSA
-# 
-hadoop.mapreduce.jobsummary.logger=${hadoop.root.logger}
-hadoop.mapreduce.jobsummary.log.file=hadoop-mapreduce.jobsummary.log
 log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
 log4j.appender.JSA=org.apache.log4j.DailyRollingFileAppender
 log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
 log4j.appender.JSA.File=${hadoop.log.dir}/${hadoop.mapreduce.jobsummary.log.file}
 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
 log4j.appender.JSA.layout=org.apache.log4j.PatternLayout
@@ -166,6 +133,26 @@ log4j.appender.JSA.DatePattern=.yyyy-MM-dd
 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
 log4j.logger.org.apache.hadoop.mapred.JobInProgress$JobSummary=${hadoop.mapreduce.jobsummary.logger}
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 log4j.additivity.org.apache.hadoop.mapred.JobInProgress$JobSummary=false
 
 
+#
+# MapReduce Audit Log Appender
+#
+
+# Set the MapReduce audit log filename
+#hadoop.mapreduce.audit.log.file=hadoop-mapreduce.audit.log
+
+# Appender for AuditLogger.
+# Requires the following system properties to be set
+#    - hadoop.log.dir (Hadoop Log directory)
+#    - hadoop.mapreduce.audit.log.file (MapReduce audit log filename)
+
+#log4j.logger.org.apache.hadoop.mapred.AuditLogger=INFO,MRAUDIT
+#log4j.additivity.org.apache.hadoop.mapred.AuditLogger=false
+#log4j.appender.MRAUDIT=org.apache.log4j.DailyRollingFileAppender
+#log4j.appender.MRAUDIT.File=${hadoop.log.dir}/${hadoop.mapreduce.audit.log.file}
+#log4j.appender.MRAUDIT.DatePattern=.yyyy-MM-dd
+#log4j.appender.MRAUDIT.layout=org.apache.log4j.PatternLayout
+#log4j.appender.MRAUDIT.layout.ConversionPattern=%d{ISO8601} %p %c: %m%n
+
 #
 #
 # Yarn ResourceManager Application Summary Log 
 # Yarn ResourceManager Application Summary Log 
 #
 #

+ 4 - 7
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/HttpAuthentication.xml

@@ -82,12 +82,10 @@
       <code>36000</code>.
       <code>36000</code>.
       </p>
       </p>
 
 
-      <p><code>hadoop.http.authentication.signature.secret.file</code>: The signature secret 
-      file for signing the authentication tokens. If not set a random secret is generated at 
+      <p><code>hadoop.http.authentication.signature.secret</code>: The signature secret for  
+      signing the authentication tokens. If not set a random secret is generated at 
       startup time. The same secret should be used for all nodes in the cluster, JobTracker, 
       startup time. The same secret should be used for all nodes in the cluster, JobTracker, 
-      NameNode, DataNode and TastTracker. The default value is 
-      <code>${user.home}/hadoop-http-auth-signature-secret</code>.
-      IMPORTANT: This file should be readable only by the Unix user running the daemons.
+      NameNode, DataNode and TastTracker. The default value is a <code>hadoop</code> value.
       </p>
       </p>
         
         
       <p><code>hadoop.http.authentication.cookie.domain</code>: The domain to use for the HTTP 
       <p><code>hadoop.http.authentication.cookie.domain</code>: The domain to use for the HTTP 
@@ -111,8 +109,7 @@
       <p><code>hadoop.http.authentication.kerberos.principal</code>: Indicates the Kerberos 
       <p><code>hadoop.http.authentication.kerberos.principal</code>: Indicates the Kerberos 
       principal to be used for HTTP endpoint when using 'kerberos' authentication.
       principal to be used for HTTP endpoint when using 'kerberos' authentication.
       The principal short name must be <code>HTTP</code> per Kerberos HTTP SPENGO specification.
       The principal short name must be <code>HTTP</code> per Kerberos HTTP SPENGO specification.
-      The default value is <code>HTTP/_HOST@$LOCALHOST</code>, where <code>_HOST</code> -if present-
-      is replaced with bind address of the HTTP server.
+      The default value is <code>HTTP/localhost@$LOCALHOST</code>.
       </p>
       </p>
 
 
       <p><code>hadoop.http.authentication.kerberos.keytab</code>: Location of the keytab file 
       <p><code>hadoop.http.authentication.kerberos.keytab</code>: Location of the keytab file 

+ 2 - 0
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/cluster_setup.xml

@@ -134,6 +134,8 @@
           <tr><td>DataNode</td><td>HADOOP_DATANODE_OPTS</td></tr>
           <tr><td>DataNode</td><td>HADOOP_DATANODE_OPTS</td></tr>
           <tr><td>SecondaryNamenode</td>
           <tr><td>SecondaryNamenode</td>
               <td>HADOOP_SECONDARYNAMENODE_OPTS</td></tr>
               <td>HADOOP_SECONDARYNAMENODE_OPTS</td></tr>
+          <tr><td>JobTracker</td><td>HADOOP_JOBTRACKER_OPTS</td></tr>
+          <tr><td>TaskTracker</td><td>HADOOP_TASKTRACKER_OPTS</td></tr>
           </table>
           </table>
           
           
           <p> For example, To configure Namenode to use parallelGC, the
           <p> For example, To configure Namenode to use parallelGC, the

+ 0 - 6
hadoop-common-project/hadoop-common/src/main/docs/src/documentation/content/xdocs/service_level_auth.xml

@@ -138,12 +138,6 @@
             dfsadmin and mradmin commands to refresh the security policy in-effect.
             dfsadmin and mradmin commands to refresh the security policy in-effect.
             </td>
             </td>
           </tr>
           </tr>
-          <tr>
-            <td><code>security.ha.service.protocol.acl</code></td>
-            <td>ACL for HAService protocol used by HAAdmin to manage the
-            active and stand-by states of namenode.
-            </td>
-          </tr>
         </table>
         </table>
       </section>
       </section>
       
       

+ 35 - 65
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/conf/Configuration.java

@@ -189,12 +189,6 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   private static final Map<ClassLoader, Map<String, Class<?>>>
   private static final Map<ClassLoader, Map<String, Class<?>>>
     CACHE_CLASSES = new WeakHashMap<ClassLoader, Map<String, Class<?>>>();
     CACHE_CLASSES = new WeakHashMap<ClassLoader, Map<String, Class<?>>>();
 
 
-  /**
-   * Sentinel value to store negative cache results in {@link #CACHE_CLASSES}.
-   */
-  private static final Class<?> NEGATIVE_CACHE_SENTINEL =
-    NegativeCacheSentinel.class;
-
   /**
   /**
    * Stores the mapping of key to the resource which modifies or loads 
    * Stores the mapping of key to the resource which modifies or loads 
    * the key most recently
    * the key most recently
@@ -309,29 +303,10 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
    * @return <code>true</code> if the key is deprecated and 
    * @return <code>true</code> if the key is deprecated and 
    *         <code>false</code> otherwise.
    *         <code>false</code> otherwise.
    */
    */
-  public static boolean isDeprecated(String key) {
+  private static boolean isDeprecated(String key) {
     return deprecatedKeyMap.containsKey(key);
     return deprecatedKeyMap.containsKey(key);
   }
   }
-
-  /**
-   * Returns the alternate name for a key if the property name is deprecated
-   * or if deprecates a property name.
-   *
-   * @param name property name.
-   * @return alternate name.
-   */
-  private String getAlternateName(String name) {
-    String altName;
-    DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
-    if (keyInfo != null) {
-      altName = (keyInfo.newKeys.length > 0) ? keyInfo.newKeys[0] : null;
-    }
-    else {
-      altName = reverseDeprecatedKeyMap.get(name);
-    }
-    return altName;
-  }
-
+ 
   /**
   /**
    * Checks for the presence of the property <code>name</code> in the
    * Checks for the presence of the property <code>name</code> in the
    * deprecation map. Returns the first of the list of new keys if present
    * deprecation map. Returns the first of the list of new keys if present
@@ -347,7 +322,9 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
   private String handleDeprecation(String name) {
   private String handleDeprecation(String name) {
     if (isDeprecated(name)) {
     if (isDeprecated(name)) {
       DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
       DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
-      warnOnceIfDeprecated(name);
+      if (!keyInfo.accessed) {
+        LOG.warn(keyInfo.getWarningMessage(name));
+      }
       for (String newKey : keyInfo.newKeys) {
       for (String newKey : keyInfo.newKeys) {
         if(newKey != null) {
         if(newKey != null) {
           name = newKey;
           name = newKey;
@@ -360,6 +337,11 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
         getOverlay().containsKey(deprecatedKey)) {
         getOverlay().containsKey(deprecatedKey)) {
       getProps().setProperty(name, getOverlay().getProperty(deprecatedKey));
       getProps().setProperty(name, getOverlay().getProperty(deprecatedKey));
       getOverlay().setProperty(name, getOverlay().getProperty(deprecatedKey));
       getOverlay().setProperty(name, getOverlay().getProperty(deprecatedKey));
+      
+      DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(deprecatedKey);
+      if (!keyInfo.accessed) {
+        LOG.warn(keyInfo.getWarningMessage(deprecatedKey));
+      }
     }
     }
     return name;
     return name;
   }
   }
@@ -637,8 +619,8 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
 
 
   /** 
   /** 
    * Set the <code>value</code> of the <code>name</code> property. If 
    * Set the <code>value</code> of the <code>name</code> property. If 
-   * <code>name</code> is deprecated or there is a deprecated name associated to it,
-   * it sets the value to both names.
+   * <code>name</code> is deprecated, it sets the <code>value</code> to the keys
+   * that replace the deprecated key.
    * 
    * 
    * @param name property name.
    * @param name property name.
    * @param value property value.
    * @param value property value.
@@ -647,35 +629,29 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     if (deprecatedKeyMap.isEmpty()) {
     if (deprecatedKeyMap.isEmpty()) {
       getProps();
       getProps();
     }
     }
-    getOverlay().setProperty(name, value);
-    getProps().setProperty(name, value);
-    updatingResource.put(name, UNKNOWN_RESOURCE);
-    String altName = getAlternateName(name);
-    if (altName != null) {
-      getOverlay().setProperty(altName, value);
-      getProps().setProperty(altName, value);
+    if (!isDeprecated(name)) {
+      getOverlay().setProperty(name, value);
+      getProps().setProperty(name, value);
+      updatingResource.put(name, UNKNOWN_RESOURCE);
     }
     }
-    warnOnceIfDeprecated(name);
-  }
-
-  private void warnOnceIfDeprecated(String name) {
-    DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
-    if (keyInfo != null && !keyInfo.accessed) {
+    else {
+      DeprecatedKeyInfo keyInfo = deprecatedKeyMap.get(name);
       LOG.warn(keyInfo.getWarningMessage(name));
       LOG.warn(keyInfo.getWarningMessage(name));
+      for (String newKey : keyInfo.newKeys) {
+        getOverlay().setProperty(newKey, value);
+        getProps().setProperty(newKey, value);
+      }
     }
     }
   }
   }
-
+  
   /**
   /**
    * Unset a previously set property.
    * Unset a previously set property.
    */
    */
   public synchronized void unset(String name) {
   public synchronized void unset(String name) {
-    String altName = getAlternateName(name);
+    name = handleDeprecation(name);
+
     getOverlay().remove(name);
     getOverlay().remove(name);
     getProps().remove(name);
     getProps().remove(name);
-    if (altName !=null) {
-      getOverlay().remove(altName);
-       getProps().remove(altName);
-    }
   }
   }
 
 
   /**
   /**
@@ -1197,24 +1173,24 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
       }
       }
     }
     }
 
 
-    Class<?> clazz = map.get(name);
-    if (clazz == null) {
+    Class<?> clazz = null;
+    if (!map.containsKey(name)) {
       try {
       try {
         clazz = Class.forName(name, true, classLoader);
         clazz = Class.forName(name, true, classLoader);
       } catch (ClassNotFoundException e) {
       } catch (ClassNotFoundException e) {
-        // Leave a marker that the class isn't found
-        map.put(name, NEGATIVE_CACHE_SENTINEL);
+        map.put(name, null); //cache negative that class is not found
         return null;
         return null;
       }
       }
       // two putters can race here, but they'll put the same class
       // two putters can race here, but they'll put the same class
       map.put(name, clazz);
       map.put(name, clazz);
-      return clazz;
-    } else if (clazz == NEGATIVE_CACHE_SENTINEL) {
-      return null; // not found
-    } else {
-      // cache hit
-      return clazz;
+    } else { // check already performed on this class name
+      clazz = map.get(name);
+      if (clazz == null) { // found the negative
+        return null;
+      }
     }
     }
+
+    return clazz;
   }
   }
 
 
   /** 
   /** 
@@ -1916,10 +1892,4 @@ public class Configuration implements Iterable<Map.Entry<String,String>>,
     Configuration.addDeprecation("fs.default.name", 
     Configuration.addDeprecation("fs.default.name", 
                new String[]{CommonConfigurationKeys.FS_DEFAULT_NAME_KEY});
                new String[]{CommonConfigurationKeys.FS_DEFAULT_NAME_KEY});
   }
   }
-  
-  /**
-   * A unique class which is used as a sentinel value in the caching
-   * for getClassByName. {@see Configuration#getClassByNameOrNull(String)}
-   */
-  private static abstract class NegativeCacheSentinel {}
 }
 }

+ 1 - 26
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeys.java

@@ -114,36 +114,11 @@ public class CommonConfigurationKeys extends CommonConfigurationKeysPublic {
   public static final String 
   public static final String 
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_USER_MAPPINGS =
   HADOOP_SECURITY_SERVICE_AUTHORIZATION_REFRESH_USER_MAPPINGS =
       "security.refresh.user.mappings.protocol.acl";
       "security.refresh.user.mappings.protocol.acl";
-  public static final String 
-  SECURITY_HA_SERVICE_PROTOCOL_ACL = "security.ha.service.protocol.acl";
   
   
   public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
   public static final String HADOOP_SECURITY_TOKEN_SERVICE_USE_IP =
       "hadoop.security.token.service.use_ip";
       "hadoop.security.token.service.use_ip";
   public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
   public static final boolean HADOOP_SECURITY_TOKEN_SERVICE_USE_IP_DEFAULT =
       true;
       true;
-  
-  /**
-   * HA health monitor and failover controller.
-   */
- 
-  /** How often to retry connecting to the service. */
-  public static final String HA_HM_CONNECT_RETRY_INTERVAL_KEY =
-    "ha.health-monitor.connect-retry-interval.ms";
-  public static final long HA_HM_CONNECT_RETRY_INTERVAL_DEFAULT = 1000;
- 
-  /* How often to check the service. */
-  public static final String HA_HM_CHECK_INTERVAL_KEY =
-    "ha.health-monitor.check-interval.ms";  
-  public static final long HA_HM_CHECK_INTERVAL_DEFAULT = 1000;
- 
-  /* How long to sleep after an unexpected RPC error. */
-  public static final String HA_HM_SLEEP_AFTER_DISCONNECT_KEY =
-    "ha.health-monitor.sleep-after-disconnect.ms";
-  public static final long HA_HM_SLEEP_AFTER_DISCONNECT_DEFAULT = 1000;
- 
-  /* Timeout for the actual monitorHealth() calls. */
-  public static final String HA_HM_RPC_TIMEOUT_KEY =
-    "ha.health-monitor.rpc-timeout.ms";
-  public static final int HA_HM_RPC_TIMEOUT_DEFAULT = 45000;
+
 }
 }
 
 

+ 0 - 9
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/CommonConfigurationKeysPublic.java

@@ -63,10 +63,6 @@ public class CommonConfigurationKeysPublic {
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
   public static final String  NET_TOPOLOGY_NODE_SWITCH_MAPPING_IMPL_KEY =
     "net.topology.node.switch.mapping.impl";
     "net.topology.node.switch.mapping.impl";
-  
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
-  public static final String  NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY =
-    "net.topology.table.file.name";
 
 
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  FS_TRASH_CHECKPOINT_INTERVAL_KEY =
   public static final String  FS_TRASH_CHECKPOINT_INTERVAL_KEY =
@@ -176,11 +172,6 @@ public class CommonConfigurationKeysPublic {
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */
   /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_KEY */
   public static final int     IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT = 10;
   public static final int     IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT = 10;
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
-  public static final String  IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY =
-    "ipc.client.connect.max.retries.on.timeouts";
-  /** Default value for IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY */
-  public static final int  IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT = 45;
-  /** See <a href="{@docRoot}/../core-default.html">core-default.xml</a> */
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
   public static final String  IPC_CLIENT_TCPNODELAY_KEY =
     "ipc.client.tcpnodelay";
     "ipc.client.tcpnodelay";
   /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */
   /** Defalt value for IPC_CLIENT_TCPNODELAY_KEY */

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/LocalDirAllocator.java

@@ -253,7 +253,7 @@ public class LocalDirAllocator {
         throws IOException {
         throws IOException {
       String newLocalDirs = conf.get(contextCfgItemName);
       String newLocalDirs = conf.get(contextCfgItemName);
       if (!newLocalDirs.equals(savedLocalDirs)) {
       if (!newLocalDirs.equals(savedLocalDirs)) {
-        localDirs = StringUtils.getTrimmedStrings(newLocalDirs);
+        localDirs = conf.getTrimmedStrings(contextCfgItemName);
         localFS = FileSystem.getLocal(conf);
         localFS = FileSystem.getLocal(conf);
         int numDirs = localDirs.length;
         int numDirs = localDirs.length;
         ArrayList<String> dirs = new ArrayList<String>(numDirs);
         ArrayList<String> dirs = new ArrayList<String>(numDirs);

+ 11 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/shell/Command.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.fs.shell;
 
 
 import java.io.FileNotFoundException;
 import java.io.FileNotFoundException;
 import java.io.IOException;
 import java.io.IOException;
+import java.io.InterruptedIOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
 import java.lang.reflect.Field;
 import java.lang.reflect.Field;
 import java.util.ArrayList;
 import java.util.ArrayList;
@@ -152,6 +153,9 @@ abstract public class Command extends Configured {
       }
       }
       processOptions(args);
       processOptions(args);
       processRawArguments(args);
       processRawArguments(args);
+    } catch (CommandInterruptException e) {
+      displayError("Interrupted");
+      return 130;
     } catch (IOException e) {
     } catch (IOException e) {
       displayError(e);
       displayError(e);
     }
     }
@@ -349,6 +353,10 @@ abstract public class Command extends Configured {
   public void displayError(Exception e) {
   public void displayError(Exception e) {
     // build up a list of exceptions that occurred
     // build up a list of exceptions that occurred
     exceptions.add(e);
     exceptions.add(e);
+    // use runtime so it rips up through the stack and exits out 
+    if (e instanceof InterruptedIOException) {
+      throw new CommandInterruptException();
+    }
     
     
     String errorMessage = e.getLocalizedMessage();
     String errorMessage = e.getLocalizedMessage();
     if (errorMessage == null) {
     if (errorMessage == null) {
@@ -454,4 +462,7 @@ abstract public class Command extends Configured {
     }
     }
     return value;
     return value;
   }
   }
+  
+  @SuppressWarnings("serial")
+  static class CommandInterruptException extends RuntimeException {}
 }
 }

+ 0 - 917
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ActiveStandbyElector.java

@@ -1,917 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-import java.util.Arrays;
-import java.util.List;
-import java.util.concurrent.locks.Lock;
-import java.util.concurrent.locks.ReentrantLock;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.util.StringUtils;
-import org.apache.zookeeper.data.ACL;
-import org.apache.zookeeper.KeeperException;
-import org.apache.zookeeper.Watcher;
-import org.apache.zookeeper.WatchedEvent;
-import org.apache.zookeeper.Watcher.Event;
-import org.apache.zookeeper.ZKUtil;
-import org.apache.zookeeper.ZooKeeper;
-import org.apache.zookeeper.CreateMode;
-import org.apache.zookeeper.AsyncCallback.*;
-import org.apache.zookeeper.data.Stat;
-import org.apache.zookeeper.KeeperException.Code;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-/**
- * 
- * This class implements a simple library to perform leader election on top of
- * Apache Zookeeper. Using Zookeeper as a coordination service, leader election
- * can be performed by atomically creating an ephemeral lock file (znode) on
- * Zookeeper. The service instance that successfully creates the znode becomes
- * active and the rest become standbys. <br/>
- * This election mechanism is only efficient for small number of election
- * candidates (order of 10's) because contention on single znode by a large
- * number of candidates can result in Zookeeper overload. <br/>
- * The elector does not guarantee fencing (protection of shared resources) among
- * service instances. After it has notified an instance about becoming a leader,
- * then that instance must ensure that it meets the service consistency
- * requirements. If it cannot do so, then it is recommended to quit the
- * election. The application implements the {@link ActiveStandbyElectorCallback}
- * to interact with the elector
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class ActiveStandbyElector implements StatCallback, StringCallback {
-
-  /**
-   * Callback interface to interact with the ActiveStandbyElector object. <br/>
-   * The application will be notified with a callback only on state changes
-   * (i.e. there will never be successive calls to becomeActive without an
-   * intermediate call to enterNeutralMode). <br/>
-   * The callbacks will be running on Zookeeper client library threads. The
-   * application should return from these callbacks quickly so as not to impede
-   * Zookeeper client library performance and notifications. The app will
-   * typically remember the state change and return from the callback. It will
-   * then proceed with implementing actions around that state change. It is
-   * possible to be called back again while these actions are in flight and the
-   * app should handle this scenario.
-   */
-  public interface ActiveStandbyElectorCallback {
-    /**
-     * This method is called when the app becomes the active leader
-     */
-    void becomeActive();
-
-    /**
-     * This method is called when the app becomes a standby
-     */
-    void becomeStandby();
-
-    /**
-     * If the elector gets disconnected from Zookeeper and does not know about
-     * the lock state, then it will notify the service via the enterNeutralMode
-     * interface. The service may choose to ignore this or stop doing state
-     * changing operations. Upon reconnection, the elector verifies the leader
-     * status and calls back on the becomeActive and becomeStandby app
-     * interfaces. <br/>
-     * Zookeeper disconnects can happen due to network issues or loss of
-     * Zookeeper quorum. Thus enterNeutralMode can be used to guard against
-     * split-brain issues. In such situations it might be prudent to call
-     * becomeStandby too. However, such state change operations might be
-     * expensive and enterNeutralMode can help guard against doing that for
-     * transient issues.
-     */
-    void enterNeutralMode();
-
-    /**
-     * If there is any fatal error (e.g. wrong ACL's, unexpected Zookeeper
-     * errors or Zookeeper persistent unavailability) then notifyFatalError is
-     * called to notify the app about it.
-     */
-    void notifyFatalError(String errorMessage);
-
-    /**
-     * If an old active has failed, rather than exited gracefully, then
-     * the new active may need to take some fencing actions against it
-     * before proceeding with failover.
-     * 
-     * @param oldActiveData the application data provided by the prior active
-     */
-    void fenceOldActive(byte[] oldActiveData);
-  }
-
-  /**
-   * Name of the lock znode used by the library. Protected for access in test
-   * classes
-   */
-  @VisibleForTesting
-  protected static final String LOCK_FILENAME = "ActiveStandbyElectorLock";
-  @VisibleForTesting
-  protected static final String BREADCRUMB_FILENAME = "ActiveBreadCrumb";
-
-  public static final Log LOG = LogFactory.getLog(ActiveStandbyElector.class);
-
-  private static final int NUM_RETRIES = 3;
-
-  private static enum ConnectionState {
-    DISCONNECTED, CONNECTED, TERMINATED
-  };
-
-  static enum State {
-    INIT, ACTIVE, STANDBY, NEUTRAL
-  };
-
-  private State state = State.INIT;
-  private int createRetryCount = 0;
-  private int statRetryCount = 0;
-  private ZooKeeper zkClient;
-  private ConnectionState zkConnectionState = ConnectionState.TERMINATED;
-
-  private final ActiveStandbyElectorCallback appClient;
-  private final String zkHostPort;
-  private final int zkSessionTimeout;
-  private final List<ACL> zkAcl;
-  private byte[] appData;
-  private final String zkLockFilePath;
-  private final String zkBreadCrumbPath;
-  private final String znodeWorkingDir;
-
-  private Lock sessionReestablishLockForTests = new ReentrantLock();
-  private boolean wantToBeInElection;
-  
-  /**
-   * Create a new ActiveStandbyElector object <br/>
-   * The elector is created by providing to it the Zookeeper configuration, the
-   * parent znode under which to create the znode and a reference to the
-   * callback interface. <br/>
-   * The parent znode name must be the same for all service instances and
-   * different across services. <br/>
-   * After the leader has been lost, a new leader will be elected after the
-   * session timeout expires. Hence, the app must set this parameter based on
-   * its needs for failure response time. The session timeout must be greater
-   * than the Zookeeper disconnect timeout and is recommended to be 3X that
-   * value to enable Zookeeper to retry transient disconnections. Setting a very
-   * short session timeout may result in frequent transitions between active and
-   * standby states during issues like network outages/GS pauses.
-   * 
-   * @param zookeeperHostPorts
-   *          ZooKeeper hostPort for all ZooKeeper servers
-   * @param zookeeperSessionTimeout
-   *          ZooKeeper session timeout
-   * @param parentZnodeName
-   *          znode under which to create the lock
-   * @param acl
-   *          ZooKeeper ACL's
-   * @param app
-   *          reference to callback interface object
-   * @throws IOException
-   * @throws HadoopIllegalArgumentException
-   */
-  public ActiveStandbyElector(String zookeeperHostPorts,
-      int zookeeperSessionTimeout, String parentZnodeName, List<ACL> acl,
-      ActiveStandbyElectorCallback app) throws IOException,
-      HadoopIllegalArgumentException {
-    if (app == null || acl == null || parentZnodeName == null
-        || zookeeperHostPorts == null || zookeeperSessionTimeout <= 0) {
-      throw new HadoopIllegalArgumentException("Invalid argument");
-    }
-    zkHostPort = zookeeperHostPorts;
-    zkSessionTimeout = zookeeperSessionTimeout;
-    zkAcl = acl;
-    appClient = app;
-    znodeWorkingDir = parentZnodeName;
-    zkLockFilePath = znodeWorkingDir + "/" + LOCK_FILENAME;
-    zkBreadCrumbPath = znodeWorkingDir + "/" + BREADCRUMB_FILENAME;    
-
-    // createConnection for future API calls
-    createConnection();
-  }
-
-  /**
-   * To participate in election, the app will call joinElection. The result will
-   * be notified by a callback on either the becomeActive or becomeStandby app
-   * interfaces. <br/>
-   * After this the elector will automatically monitor the leader status and
-   * perform re-election if necessary<br/>
-   * The app could potentially start off in standby mode and ignore the
-   * becomeStandby call.
-   * 
-   * @param data
-   *          to be set by the app. non-null data must be set.
-   * @throws HadoopIllegalArgumentException
-   *           if valid data is not supplied
-   */
-  public synchronized void joinElection(byte[] data)
-      throws HadoopIllegalArgumentException {
-    
-    LOG.debug("Attempting active election");
-
-    if (data == null) {
-      throw new HadoopIllegalArgumentException("data cannot be null");
-    }
-
-    appData = new byte[data.length];
-    System.arraycopy(data, 0, appData, 0, data.length);
-
-    joinElectionInternal();
-  }
-  
-  /**
-   * @return true if the configured parent znode exists
-   */
-  public synchronized boolean parentZNodeExists()
-      throws IOException, InterruptedException {
-    Preconditions.checkState(zkClient != null);
-    try {
-      return zkClient.exists(znodeWorkingDir, false) != null;
-    } catch (KeeperException e) {
-      throw new IOException("Couldn't determine existence of znode '" +
-          znodeWorkingDir + "'", e);
-    }
-  }
-
-  /**
-   * Utility function to ensure that the configured base znode exists.
-   * This recursively creates the znode as well as all of its parents.
-   */
-  public synchronized void ensureParentZNode()
-      throws IOException, InterruptedException {
-    String pathParts[] = znodeWorkingDir.split("/");
-    Preconditions.checkArgument(pathParts.length >= 1 &&
-        "".equals(pathParts[0]),
-        "Invalid path: %s", znodeWorkingDir);
-    
-    StringBuilder sb = new StringBuilder();
-    for (int i = 1; i < pathParts.length; i++) {
-      sb.append("/").append(pathParts[i]);
-      String prefixPath = sb.toString();
-      LOG.debug("Ensuring existence of " + prefixPath);
-      try {
-        createWithRetries(prefixPath, new byte[]{}, zkAcl, CreateMode.PERSISTENT);
-      } catch (KeeperException e) {
-        if (isNodeExists(e.code())) {
-          // This is OK - just ensuring existence.
-          continue;
-        } else {
-          throw new IOException("Couldn't create " + prefixPath, e);
-        }
-      }
-    }
-    
-    LOG.info("Successfully created " + znodeWorkingDir + " in ZK.");
-  }
-  
-  /**
-   * Clear all of the state held within the parent ZNode.
-   * This recursively deletes everything within the znode as well as the
-   * parent znode itself. It should only be used when it's certain that
-   * no electors are currently participating in the election.
-   */
-  public synchronized void clearParentZNode()
-      throws IOException, InterruptedException {
-    try {
-      LOG.info("Recursively deleting " + znodeWorkingDir + " from ZK...");
-
-      zkDoWithRetries(new ZKAction<Void>() {
-        @Override
-        public Void run() throws KeeperException, InterruptedException {
-          ZKUtil.deleteRecursive(zkClient, znodeWorkingDir);
-          return null;
-        }
-      });
-    } catch (KeeperException e) {
-      throw new IOException("Couldn't clear parent znode " + znodeWorkingDir,
-          e);
-    }
-    LOG.info("Successfully deleted " + znodeWorkingDir + " from ZK.");
-  }
-
-
-  /**
-   * Any service instance can drop out of the election by calling quitElection. 
-   * <br/>
-   * This will lose any leader status, if held, and stop monitoring of the lock
-   * node. <br/>
-   * If the instance wants to participate in election again, then it needs to
-   * call joinElection(). <br/>
-   * This allows service instances to take themselves out of rotation for known
-   * impending unavailable states (e.g. long GC pause or software upgrade).
-   * 
-   * @param needFence true if the underlying daemon may need to be fenced
-   * if a failover occurs due to dropping out of the election.
-   */
-  public synchronized void quitElection(boolean needFence) {
-    LOG.info("Yielding from election");
-    if (!needFence && state == State.ACTIVE) {
-      // If active is gracefully going back to standby mode, remove
-      // our permanent znode so no one fences us.
-      tryDeleteOwnBreadCrumbNode();
-    }
-    reset();
-    wantToBeInElection = false;
-  }
-
-  /**
-   * Exception thrown when there is no active leader
-   */
-  public static class ActiveNotFoundException extends Exception {
-    private static final long serialVersionUID = 3505396722342846462L;
-  }
-
-  /**
-   * get data set by the active leader
-   * 
-   * @return data set by the active instance
-   * @throws ActiveNotFoundException
-   *           when there is no active leader
-   * @throws KeeperException
-   *           other zookeeper operation errors
-   * @throws InterruptedException
-   * @throws IOException
-   *           when ZooKeeper connection could not be established
-   */
-  public synchronized byte[] getActiveData() throws ActiveNotFoundException,
-      KeeperException, InterruptedException, IOException {
-    try {
-      if (zkClient == null) {
-        createConnection();
-      }
-      Stat stat = new Stat();
-      return zkClient.getData(zkLockFilePath, false, stat);
-    } catch(KeeperException e) {
-      Code code = e.code();
-      if (isNodeDoesNotExist(code)) {
-        // handle the commonly expected cases that make sense for us
-        throw new ActiveNotFoundException();
-      } else {
-        throw e;
-      }
-    }
-  }
-
-  /**
-   * interface implementation of Zookeeper callback for create
-   */
-  @Override
-  public synchronized void processResult(int rc, String path, Object ctx,
-      String name) {
-    if (isStaleClient(ctx)) return;
-    LOG.debug("CreateNode result: " + rc + " for path: " + path
-        + " connectionState: " + zkConnectionState);
-
-    Code code = Code.get(rc);
-    if (isSuccess(code)) {
-      // we successfully created the znode. we are the leader. start monitoring
-      becomeActive();
-      monitorActiveStatus();
-      return;
-    }
-
-    if (isNodeExists(code)) {
-      if (createRetryCount == 0) {
-        // znode exists and we did not retry the operation. so a different
-        // instance has created it. become standby and monitor lock.
-        becomeStandby();
-      }
-      // if we had retried then the znode could have been created by our first
-      // attempt to the server (that we lost) and this node exists response is
-      // for the second attempt. verify this case via ephemeral node owner. this
-      // will happen on the callback for monitoring the lock.
-      monitorActiveStatus();
-      return;
-    }
-
-    String errorMessage = "Received create error from Zookeeper. code:"
-        + code.toString() + " for path " + path;
-    LOG.debug(errorMessage);
-
-    if (shouldRetry(code)) {
-      if (createRetryCount < NUM_RETRIES) {
-        LOG.debug("Retrying createNode createRetryCount: " + createRetryCount);
-        ++createRetryCount;
-        createLockNodeAsync();
-        return;
-      }
-      errorMessage = errorMessage
-          + ". Not retrying further znode create connection errors.";
-    } else if (isSessionExpired(code)) {
-      // This isn't fatal - the client Watcher will re-join the election
-      LOG.warn("Lock acquisition failed because session was lost");
-      return;
-    }
-
-    fatalError(errorMessage);
-  }
-
-  /**
-   * interface implementation of Zookeeper callback for monitor (exists)
-   */
-  @Override
-  public synchronized void processResult(int rc, String path, Object ctx,
-      Stat stat) {
-    if (isStaleClient(ctx)) return;
-    LOG.debug("StatNode result: " + rc + " for path: " + path
-        + " connectionState: " + zkConnectionState);
-
-    Code code = Code.get(rc);
-    if (isSuccess(code)) {
-      // the following owner check completes verification in case the lock znode
-      // creation was retried
-      if (stat.getEphemeralOwner() == zkClient.getSessionId()) {
-        // we own the lock znode. so we are the leader
-        becomeActive();
-      } else {
-        // we dont own the lock znode. so we are a standby.
-        becomeStandby();
-      }
-      // the watch set by us will notify about changes
-      return;
-    }
-
-    if (isNodeDoesNotExist(code)) {
-      // the lock znode disappeared before we started monitoring it
-      enterNeutralMode();
-      joinElectionInternal();
-      return;
-    }
-
-    String errorMessage = "Received stat error from Zookeeper. code:"
-        + code.toString();
-    LOG.debug(errorMessage);
-
-    if (shouldRetry(code)) {
-      if (statRetryCount < NUM_RETRIES) {
-        ++statRetryCount;
-        monitorLockNodeAsync();
-        return;
-      }
-      errorMessage = errorMessage
-          + ". Not retrying further znode monitoring connection errors.";
-    }
-
-    fatalError(errorMessage);
-  }
-
-  /**
-   * interface implementation of Zookeeper watch events (connection and node)
-   */
-  synchronized void processWatchEvent(ZooKeeper zk, WatchedEvent event) {
-    Event.EventType eventType = event.getType();
-    if (isStaleClient(zk)) return;
-    LOG.debug("Watcher event type: " + eventType + " with state:"
-        + event.getState() + " for path:" + event.getPath()
-        + " connectionState: " + zkConnectionState);
-
-    if (eventType == Event.EventType.None) {
-      // the connection state has changed
-      switch (event.getState()) {
-      case SyncConnected:
-        LOG.info("Session connected.");
-        // if the listener was asked to move to safe state then it needs to
-        // be undone
-        ConnectionState prevConnectionState = zkConnectionState;
-        zkConnectionState = ConnectionState.CONNECTED;
-        if (prevConnectionState == ConnectionState.DISCONNECTED) {
-          monitorActiveStatus();
-        }
-        break;
-      case Disconnected:
-        LOG.info("Session disconnected. Entering neutral mode...");
-
-        // ask the app to move to safe state because zookeeper connection
-        // is not active and we dont know our state
-        zkConnectionState = ConnectionState.DISCONNECTED;
-        enterNeutralMode();
-        break;
-      case Expired:
-        // the connection got terminated because of session timeout
-        // call listener to reconnect
-        LOG.info("Session expired. Entering neutral mode and rejoining...");
-        enterNeutralMode();
-        reJoinElection();
-        break;
-      default:
-        fatalError("Unexpected Zookeeper watch event state: "
-            + event.getState());
-        break;
-      }
-
-      return;
-    }
-
-    // a watch on lock path in zookeeper has fired. so something has changed on
-    // the lock. ideally we should check that the path is the same as the lock
-    // path but trusting zookeeper for now
-    String path = event.getPath();
-    if (path != null) {
-      switch (eventType) {
-      case NodeDeleted:
-        if (state == State.ACTIVE) {
-          enterNeutralMode();
-        }
-        joinElectionInternal();
-        break;
-      case NodeDataChanged:
-        monitorActiveStatus();
-        break;
-      default:
-        LOG.debug("Unexpected node event: " + eventType + " for path: " + path);
-        monitorActiveStatus();
-      }
-
-      return;
-    }
-
-    // some unexpected error has occurred
-    fatalError("Unexpected watch error from Zookeeper");
-  }
-
-  /**
-   * Get a new zookeeper client instance. protected so that test class can
-   * inherit and pass in a mock object for zookeeper
-   * 
-   * @return new zookeeper client instance
-   * @throws IOException
-   */
-  protected synchronized ZooKeeper getNewZooKeeper() throws IOException {
-    ZooKeeper zk = new ZooKeeper(zkHostPort, zkSessionTimeout, null);
-    zk.register(new WatcherWithClientRef(zk));
-    return zk;
-  }
-
-  private void fatalError(String errorMessage) {
-    reset();
-    appClient.notifyFatalError(errorMessage);
-  }
-
-  private void monitorActiveStatus() {
-    LOG.debug("Monitoring active leader");
-    statRetryCount = 0;
-    monitorLockNodeAsync();
-  }
-
-  private void joinElectionInternal() {
-    if (zkClient == null) {
-      if (!reEstablishSession()) {
-        fatalError("Failed to reEstablish connection with ZooKeeper");
-        return;
-      }
-    }
-
-    createRetryCount = 0;
-    wantToBeInElection = true;
-    createLockNodeAsync();
-  }
-
-  private void reJoinElection() {
-    LOG.info("Trying to re-establish ZK session");
-    
-    // Some of the test cases rely on expiring the ZK sessions and
-    // ensuring that the other node takes over. But, there's a race
-    // where the original lease holder could reconnect faster than the other
-    // thread manages to take the lock itself. This lock allows the
-    // tests to block the reconnection. It's a shame that this leaked
-    // into non-test code, but the lock is only acquired here so will never
-    // be contended.
-    sessionReestablishLockForTests.lock();
-    try {
-      terminateConnection();
-      joinElectionInternal();
-    } finally {
-      sessionReestablishLockForTests.unlock();
-    }
-  }
-  
-  @VisibleForTesting
-  void preventSessionReestablishmentForTests() {
-    sessionReestablishLockForTests.lock();
-  }
-  
-  @VisibleForTesting
-  void allowSessionReestablishmentForTests() {
-    sessionReestablishLockForTests.unlock();
-  }
-  
-  @VisibleForTesting
-  long getZKSessionIdForTests() {
-    return zkClient.getSessionId();
-  }
-  
-  @VisibleForTesting
-  synchronized State getStateForTests() {
-    return state;
-  }
-
-  private boolean reEstablishSession() {
-    int connectionRetryCount = 0;
-    boolean success = false;
-    while(!success && connectionRetryCount < NUM_RETRIES) {
-      LOG.debug("Establishing zookeeper connection");
-      try {
-        createConnection();
-        success = true;
-      } catch(IOException e) {
-        LOG.warn(e);
-        try {
-          Thread.sleep(5000);
-        } catch(InterruptedException e1) {
-          LOG.warn(e1);
-        }
-      }
-      ++connectionRetryCount;
-    }
-    return success;
-  }
-
-  private void createConnection() throws IOException {
-    zkClient = getNewZooKeeper();
-  }
-  
-  private void terminateConnection() {
-    if (zkClient == null) {
-      return;
-    }
-    LOG.debug("Terminating ZK connection");
-    ZooKeeper tempZk = zkClient;
-    zkClient = null;
-    try {
-      tempZk.close();
-    } catch(InterruptedException e) {
-      LOG.warn(e);
-    }
-    zkConnectionState = ConnectionState.TERMINATED;
-  }
-
-  private void reset() {
-    state = State.INIT;
-    terminateConnection();
-  }
-
-  private void becomeActive() {
-    assert wantToBeInElection;
-    if (state != State.ACTIVE) {
-      try {
-        Stat oldBreadcrumbStat = fenceOldActive();
-        writeBreadCrumbNode(oldBreadcrumbStat);
-      } catch (Exception e) {
-        LOG.warn("Exception handling the winning of election", e);
-        reJoinElection();
-        return;
-      }
-      LOG.debug("Becoming active");
-      state = State.ACTIVE;
-      appClient.becomeActive();
-    }
-  }
-
-  /**
-   * Write the "ActiveBreadCrumb" node, indicating that this node may need
-   * to be fenced on failover.
-   * @param oldBreadcrumbStat 
-   */
-  private void writeBreadCrumbNode(Stat oldBreadcrumbStat)
-      throws KeeperException, InterruptedException {
-    LOG.info("Writing znode " + zkBreadCrumbPath +
-        " to indicate that the local node is the most recent active...");
-    if (oldBreadcrumbStat == null) {
-      // No previous active, just create the node
-      createWithRetries(zkBreadCrumbPath, appData, zkAcl,
-        CreateMode.PERSISTENT);
-    } else {
-      // There was a previous active, update the node
-      setDataWithRetries(zkBreadCrumbPath, appData, oldBreadcrumbStat.getVersion());
-    }
-  }
-  
-  /**
-   * Try to delete the "ActiveBreadCrumb" node when gracefully giving up
-   * active status.
-   * If this fails, it will simply warn, since the graceful release behavior
-   * is only an optimization.
-   */
-  private void tryDeleteOwnBreadCrumbNode() {
-    assert state == State.ACTIVE;
-    LOG.info("Deleting bread-crumb of active node...");
-    
-    // Sanity check the data. This shouldn't be strictly necessary,
-    // but better to play it safe.
-    Stat stat = new Stat();
-    byte[] data = null;
-    try {
-      data = zkClient.getData(zkBreadCrumbPath, false, stat);
-
-      if (!Arrays.equals(data, appData)) {
-        throw new IllegalStateException(
-            "We thought we were active, but in fact " +
-            "the active znode had the wrong data: " +
-            StringUtils.byteToHexString(data) + " (stat=" + stat + ")");
-      }
-      
-      deleteWithRetries(zkBreadCrumbPath, stat.getVersion());
-    } catch (Exception e) {
-      LOG.warn("Unable to delete our own bread-crumb of being active at " +
-          zkBreadCrumbPath + ": " + e.getLocalizedMessage() + ". " +
-          "Expecting to be fenced by the next active.");
-    }
-  }
-
-  /**
-   * If there is a breadcrumb node indicating that another node may need
-   * fencing, try to fence that node.
-   * @return the Stat of the breadcrumb node that was read, or null
-   * if no breadcrumb node existed
-   */
-  private Stat fenceOldActive() throws InterruptedException, KeeperException {
-    final Stat stat = new Stat();
-    byte[] data;
-    LOG.info("Checking for any old active which needs to be fenced...");
-    try {
-      data = zkDoWithRetries(new ZKAction<byte[]>() {
-        @Override
-        public byte[] run() throws KeeperException, InterruptedException {
-          return zkClient.getData(zkBreadCrumbPath, false, stat);
-        }
-      });
-    } catch (KeeperException ke) {
-      if (isNodeDoesNotExist(ke.code())) {
-        LOG.info("No old node to fence");
-        return null;
-      }
-      
-      // If we failed to read for any other reason, then likely we lost
-      // our session, or we don't have permissions, etc. In any case,
-      // we probably shouldn't become active, and failing the whole
-      // thing is the best bet.
-      throw ke;
-    }
-
-    LOG.info("Old node exists: " + StringUtils.byteToHexString(data));
-    if (Arrays.equals(data, appData)) {
-      LOG.info("But old node has our own data, so don't need to fence it.");
-    } else {
-      appClient.fenceOldActive(data);
-    }
-    return stat;
-  }
-
-  private void becomeStandby() {
-    if (state != State.STANDBY) {
-      LOG.debug("Becoming standby");
-      state = State.STANDBY;
-      appClient.becomeStandby();
-    }
-  }
-
-  private void enterNeutralMode() {
-    if (state != State.NEUTRAL) {
-      LOG.debug("Entering neutral mode");
-      state = State.NEUTRAL;
-      appClient.enterNeutralMode();
-    }
-  }
-
-  private void createLockNodeAsync() {
-    zkClient.create(zkLockFilePath, appData, zkAcl, CreateMode.EPHEMERAL,
-        this, zkClient);
-  }
-
-  private void monitorLockNodeAsync() {
-    zkClient.exists(zkLockFilePath, 
-        new WatcherWithClientRef(zkClient), this,
-        zkClient);
-  }
-
-  private String createWithRetries(final String path, final byte[] data,
-      final List<ACL> acl, final CreateMode mode)
-      throws InterruptedException, KeeperException {
-    return zkDoWithRetries(new ZKAction<String>() {
-      public String run() throws KeeperException, InterruptedException {
-        return zkClient.create(path, data, acl, mode);
-      }
-    });
-  }
-
-  private Stat setDataWithRetries(final String path, final byte[] data,
-      final int version) throws InterruptedException, KeeperException {
-    return zkDoWithRetries(new ZKAction<Stat>() {
-      public Stat run() throws KeeperException, InterruptedException {
-        return zkClient.setData(path, data, version);
-      }
-    });
-  }
-  
-  private void deleteWithRetries(final String path, final int version)
-      throws KeeperException, InterruptedException {
-    zkDoWithRetries(new ZKAction<Void>() {
-      public Void run() throws KeeperException, InterruptedException {
-        zkClient.delete(path, version);
-        return null;
-      }
-    });
-  }
-
-  private static <T> T zkDoWithRetries(ZKAction<T> action)
-      throws KeeperException, InterruptedException {
-    int retry = 0;
-    while (true) {
-      try {
-        return action.run();
-      } catch (KeeperException ke) {
-        if (shouldRetry(ke.code()) && ++retry < NUM_RETRIES) {
-          continue;
-        }
-        throw ke;
-      }
-    }
-  }
-
-  private interface ZKAction<T> {
-    T run() throws KeeperException, InterruptedException; 
-  }
-  
-  /**
-   * The callbacks and watchers pass a reference to the ZK client
-   * which made the original call. We don't want to take action
-   * based on any callbacks from prior clients after we quit
-   * the election.
-   * @param ctx the ZK client passed into the watcher
-   * @return true if it matches the current client
-   */
-  private synchronized boolean isStaleClient(Object ctx) {
-    Preconditions.checkNotNull(ctx);
-    if (zkClient != (ZooKeeper)ctx) {
-      LOG.warn("Ignoring stale result from old client with sessionId " +
-          String.format("0x%08x", ((ZooKeeper)ctx).getSessionId()));
-      return true;
-    }
-    return false;
-  }
-
-  /**
-   * Watcher implementation which keeps a reference around to the
-   * original ZK connection, and passes it back along with any
-   * events.
-   */
-  private final class WatcherWithClientRef implements Watcher {
-    private final ZooKeeper zk;
-
-    private WatcherWithClientRef(ZooKeeper zk) {
-      this.zk = zk;
-    }
-
-    @Override
-    public void process(WatchedEvent event) {
-      ActiveStandbyElector.this.processWatchEvent(
-          zk, event);
-    }
-  }
-
-  private static boolean isSuccess(Code code) {
-    return (code == Code.OK);
-  }
-
-  private static boolean isNodeExists(Code code) {
-    return (code == Code.NODEEXISTS);
-  }
-
-  private static boolean isNodeDoesNotExist(Code code) {
-    return (code == Code.NONODE);
-  }
-  
-  private static boolean isSessionExpired(Code code) {
-    return (code == Code.SESSIONEXPIRED);
-  }
-
-  private static boolean shouldRetry(Code code) {
-    switch (code) {
-    case CONNECTIONLOSS:
-    case OPERATIONTIMEOUT:
-      return true;
-    }
-    return false;
-  }
-
-}

+ 0 - 203
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FailoverController.java

@@ -1,203 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-import org.apache.hadoop.ipc.RPC;
-
-import com.google.common.base.Preconditions;
-
-/**
- * The FailOverController is responsible for electing an active service
- * on startup or when the current active is changing (eg due to failure),
- * monitoring the health of a service, and performing a fail-over when a
- * new active service is either manually selected by a user or elected.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class FailoverController {
-
-  private static final Log LOG = LogFactory.getLog(FailoverController.class);
-
-  private static final int GRACEFUL_FENCE_TIMEOUT = 5000;
-
-  /**
-   * Perform pre-failover checks on the given service we plan to
-   * failover to, eg to prevent failing over to a service (eg due
-   * to it being inaccessible, already active, not healthy, etc).
-   *
-   * An option to ignore toSvc if it claims it is not ready to
-   * become active is provided in case performing a failover will
-   * allow it to become active, eg because it triggers a log roll
-   * so the standby can learn about new blocks and leave safemode.
-   *
-   * @param target service to make active
-   * @param forceActive ignore toSvc if it reports that it is not ready
-   * @throws FailoverFailedException if we should avoid failover
-   */
-  private static void preFailoverChecks(HAServiceTarget target,
-                                        boolean forceActive)
-      throws FailoverFailedException {
-    HAServiceStatus toSvcStatus;
-    HAServiceProtocol toSvc;
-
-    try {
-      toSvc = target.getProxy();
-      toSvcStatus = toSvc.getServiceStatus();
-    } catch (IOException e) {
-      String msg = "Unable to get service state for " + target;
-      LOG.error(msg, e);
-      throw new FailoverFailedException(msg, e);
-    }
-
-    if (!toSvcStatus.getState().equals(HAServiceState.STANDBY)) {
-      throw new FailoverFailedException(
-          "Can't failover to an active service");
-    }
-    
-    if (!toSvcStatus.isReadyToBecomeActive()) {
-      String notReadyReason = toSvcStatus.getNotReadyReason();
-      if (!forceActive) {
-        throw new FailoverFailedException(
-            target + " is not ready to become active: " +
-            notReadyReason);
-      } else {
-        LOG.warn("Service is not ready to become active, but forcing: " +
-            notReadyReason);
-      }
-    }
-
-    try {
-      HAServiceProtocolHelper.monitorHealth(toSvc);
-    } catch (HealthCheckFailedException hce) {
-      throw new FailoverFailedException(
-          "Can't failover to an unhealthy service", hce);
-    } catch (IOException e) {
-      throw new FailoverFailedException(
-          "Got an IO exception", e);
-    }
-  }
-  
-  
-  /**
-   * Try to get the HA state of the node at the given address. This
-   * function is guaranteed to be "quick" -- ie it has a short timeout
-   * and no retries. Its only purpose is to avoid fencing a node that
-   * has already restarted.
-   */
-  static boolean tryGracefulFence(Configuration conf,
-      HAServiceTarget svc) {
-    HAServiceProtocol proxy = null;
-    try {
-      proxy = svc.getProxy(conf, GRACEFUL_FENCE_TIMEOUT);
-      proxy.transitionToStandby();
-      return true;
-    } catch (ServiceFailedException sfe) {
-      LOG.warn("Unable to gracefully make " + svc + " standby (" +
-          sfe.getMessage() + ")");
-    } catch (IOException ioe) {
-      LOG.warn("Unable to gracefully make " + svc +
-          " standby (unable to connect)", ioe);
-    } finally {
-      if (proxy != null) {
-        RPC.stopProxy(proxy);
-      }
-    }
-    return false;
-  }
-  
-  /**
-   * Failover from service 1 to service 2. If the failover fails
-   * then try to failback.
-   *
-   * @param fromSvc currently active service
-   * @param toSvc service to make active
-   * @param forceFence to fence fromSvc even if not strictly necessary
-   * @param forceActive try to make toSvc active even if it is not ready
-   * @throws FailoverFailedException if the failover fails
-   */
-  public static void failover(HAServiceTarget fromSvc,
-                              HAServiceTarget toSvc,
-                              boolean forceFence,
-                              boolean forceActive)
-      throws FailoverFailedException {
-    Preconditions.checkArgument(fromSvc.getFencer() != null,
-        "failover requires a fencer");
-    preFailoverChecks(toSvc, forceActive);
-
-    // Try to make fromSvc standby
-    boolean tryFence = true;
-    
-    if (tryGracefulFence(new Configuration(), fromSvc)) {
-      tryFence = forceFence;
-    }
-
-    // Fence fromSvc if it's required or forced by the user
-    if (tryFence) {
-      if (!fromSvc.getFencer().fence(fromSvc)) {
-        throw new FailoverFailedException("Unable to fence " +
-            fromSvc + ". Fencing failed.");
-      }
-    }
-
-    // Try to make toSvc active
-    boolean failed = false;
-    Throwable cause = null;
-    try {
-      HAServiceProtocolHelper.transitionToActive(toSvc.getProxy());
-    } catch (ServiceFailedException sfe) {
-      LOG.error("Unable to make " + toSvc + " active (" +
-          sfe.getMessage() + "). Failing back.");
-      failed = true;
-      cause = sfe;
-    } catch (IOException ioe) {
-      LOG.error("Unable to make " + toSvc +
-          " active (unable to connect). Failing back.", ioe);
-      failed = true;
-      cause = ioe;
-    }
-
-    // We failed to make toSvc active
-    if (failed) {
-      String msg = "Unable to failover to " + toSvc;
-      // Only try to failback if we didn't fence fromSvc
-      if (!tryFence) {
-        try {
-          // Unconditionally fence toSvc in case it is still trying to
-          // become active, eg we timed out waiting for its response.
-          // Unconditionally force fromSvc to become active since it
-          // was previously active when we initiated failover.
-          failover(toSvc, fromSvc, true, true);
-        } catch (FailoverFailedException ffe) {
-          msg += ". Failback to " + fromSvc +
-            " failed (" + ffe.getMessage() + ")";
-          LOG.fatal(msg);
-        }
-      }
-      throw new FailoverFailedException(msg, cause);
-    }
-  }
-}

+ 0 - 65
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/FenceMethod.java

@@ -1,65 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configurable;
-
-/**
- * A fencing method is a method by which one node can forcibly prevent
- * another node from making continued progress. This might be implemented
- * by killing a process on the other node, by denying the other node's
- * access to shared storage, or by accessing a PDU to cut the other node's
- * power.
- * <p>
- * Since these methods are often vendor- or device-specific, operators
- * may implement this interface in order to achieve fencing.
- * <p>
- * Fencing is configured by the operator as an ordered list of methods to
- * attempt. Each method will be tried in turn, and the next in the list
- * will only be attempted if the previous one fails. See {@link NodeFencer}
- * for more information.
- * <p>
- * If an implementation also implements {@link Configurable} then its
- * <code>setConf</code> method will be called upon instantiation.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Unstable
-public interface FenceMethod {
-  /**
-   * Verify that the given fencing method's arguments are valid.
-   * @param args the arguments provided in the configuration. This may
-   *        be null if the operator did not configure any arguments.
-   * @throws BadFencingConfigurationException if the arguments are invalid
-   */
-  public void checkArgs(String args) throws BadFencingConfigurationException;
-  
-  /**
-   * Attempt to fence the target node.
-   * @param serviceAddr the address (host:ipcport) of the service to fence
-   * @param args the configured arguments, which were checked at startup by
-   *             {@link #checkArgs(String)}
-   * @return true if fencing was successful, false if unsuccessful or
-   *              indeterminate
-   * @throws BadFencingConfigurationException if the configuration was
-   *         determined to be invalid only at runtime
-   */
-  public boolean tryFence(HAServiceTarget target, String args)
-    throws BadFencingConfigurationException;
-}

+ 0 - 296
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAAdmin.java

@@ -1,296 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-import java.io.PrintStream;
-import java.util.Map;
-
-import org.apache.commons.cli.Options;
-import org.apache.commons.cli.CommandLine;
-import org.apache.commons.cli.CommandLineParser;
-import org.apache.commons.cli.GnuParser;
-import org.apache.commons.cli.ParseException;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-
-import com.google.common.collect.ImmutableMap;
-
-/**
- * A command-line tool for making calls in the HAServiceProtocol.
- * For example,. this can be used to force a service to standby or active
- * mode, or to trigger a health-check.
- */
-@InterfaceAudience.Private
-
-public abstract class HAAdmin extends Configured implements Tool {
-  
-  private static final String FORCEFENCE  = "forcefence";
-  private static final String FORCEACTIVE = "forceactive";
-  private static final Log LOG = LogFactory.getLog(HAAdmin.class);
-
-  private static Map<String, UsageInfo> USAGE =
-    ImmutableMap.<String, UsageInfo>builder()
-    .put("-transitionToActive",
-        new UsageInfo("<serviceId>", "Transitions the service into Active state"))
-    .put("-transitionToStandby",
-        new UsageInfo("<serviceId>", "Transitions the service into Standby state"))
-    .put("-failover",
-        new UsageInfo("[--"+FORCEFENCE+"] [--"+FORCEACTIVE+"] <serviceId> <serviceId>",
-            "Failover from the first service to the second.\n" +
-            "Unconditionally fence services if the "+FORCEFENCE+" option is used.\n" +
-            "Try to failover to the target service even if it is not ready if the " + 
-            FORCEACTIVE + " option is used."))
-    .put("-getServiceState",
-        new UsageInfo("<serviceId>", "Returns the state of the service"))
-    .put("-checkHealth",
-        new UsageInfo("<serviceId>",
-            "Requests that the service perform a health check.\n" + 
-            "The HAAdmin tool will exit with a non-zero exit code\n" +
-            "if the check fails."))
-    .put("-help",
-        new UsageInfo("<command>", "Displays help on the specified command"))
-    .build();
-
-  /** Output stream for errors, for use in tests */
-  protected PrintStream errOut = System.err;
-  PrintStream out = System.out;
-
-  protected abstract HAServiceTarget resolveTarget(String string);
-
-  protected String getUsageString() {
-    return "Usage: HAAdmin";
-  }
-
-  protected void printUsage(PrintStream errOut) {
-    errOut.println(getUsageString());
-    for (Map.Entry<String, UsageInfo> e : USAGE.entrySet()) {
-      String cmd = e.getKey();
-      UsageInfo usage = e.getValue();
-      
-      errOut.println("    [" + cmd + " " + usage.args + "]"); 
-    }
-    errOut.println();
-    ToolRunner.printGenericCommandUsage(errOut);    
-  }
-  
-  private static void printUsage(PrintStream errOut, String cmd) {
-    UsageInfo usage = USAGE.get(cmd);
-    if (usage == null) {
-      throw new RuntimeException("No usage for cmd " + cmd);
-    }
-    errOut.println("Usage: HAAdmin [" + cmd + " " + usage.args + "]");
-  }
-
-  private int transitionToActive(final String[] argv)
-      throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
-      errOut.println("transitionToActive: incorrect number of arguments");
-      printUsage(errOut, "-transitionToActive");
-      return -1;
-    }
-    
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
-    HAServiceProtocolHelper.transitionToActive(proto);
-    return 0;
-  }
-
-  private int transitionToStandby(final String[] argv)
-      throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
-      errOut.println("transitionToStandby: incorrect number of arguments");
-      printUsage(errOut, "-transitionToStandby");
-      return -1;
-    }
-    
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
-    HAServiceProtocolHelper.transitionToStandby(proto);
-    return 0;
-  }
-
-  private int failover(final String[] argv)
-      throws IOException, ServiceFailedException {
-    boolean forceFence = false;
-    boolean forceActive = false;
-
-    Options failoverOpts = new Options();
-    // "-failover" isn't really an option but we need to add
-    // it to appease CommandLineParser
-    failoverOpts.addOption("failover", false, "failover");
-    failoverOpts.addOption(FORCEFENCE, false, "force fencing");
-    failoverOpts.addOption(FORCEACTIVE, false, "force failover");
-
-    CommandLineParser parser = new GnuParser();
-    CommandLine cmd;
-
-    try {
-      cmd = parser.parse(failoverOpts, argv);
-      forceFence = cmd.hasOption(FORCEFENCE);
-      forceActive = cmd.hasOption(FORCEACTIVE);
-    } catch (ParseException pe) {
-      errOut.println("failover: incorrect arguments");
-      printUsage(errOut, "-failover");
-      return -1;
-    }
-    
-    int numOpts = cmd.getOptions() == null ? 0 : cmd.getOptions().length;
-    final String[] args = cmd.getArgs();
-
-    if (numOpts > 2 || args.length != 2) {
-      errOut.println("failover: incorrect arguments");
-      printUsage(errOut, "-failover");
-      return -1;
-    }
-
-    HAServiceTarget fromNode = resolveTarget(args[0]);
-    HAServiceTarget toNode = resolveTarget(args[1]);
-    
-    try {
-      FailoverController.failover(fromNode, toNode,
-          forceFence, forceActive); 
-      out.println("Failover from "+args[0]+" to "+args[1]+" successful");
-    } catch (FailoverFailedException ffe) {
-      errOut.println("Failover failed: " + ffe.getLocalizedMessage());
-      return -1;
-    }
-    return 0;
-  }
-
-  private int checkHealth(final String[] argv)
-      throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
-      errOut.println("checkHealth: incorrect number of arguments");
-      printUsage(errOut, "-checkHealth");
-      return -1;
-    }
-    
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
-    try {
-      HAServiceProtocolHelper.monitorHealth(proto);
-    } catch (HealthCheckFailedException e) {
-      errOut.println("Health check failed: " + e.getLocalizedMessage());
-      return -1;
-    }
-    return 0;
-  }
-
-  private int getServiceState(final String[] argv)
-      throws IOException, ServiceFailedException {
-    if (argv.length != 2) {
-      errOut.println("getServiceState: incorrect number of arguments");
-      printUsage(errOut, "-getServiceState");
-      return -1;
-    }
-
-    HAServiceProtocol proto = resolveTarget(argv[1]).getProxy();
-    out.println(proto.getServiceStatus().getState());
-    return 0;
-  }
-
-  /**
-   * Return the serviceId as is, we are assuming it was
-   * given as a service address of form <host:ipcport>.
-   */
-  protected String getServiceAddr(String serviceId) {
-    return serviceId;
-  }
-
-  @Override
-  public int run(String[] argv) throws Exception {
-    try {
-      return runCmd(argv);
-    } catch (IllegalArgumentException iae) {
-      errOut.println("Illegal argument: " + iae.getLocalizedMessage());
-      return -1;
-    } catch (IOException ioe) {
-      errOut.println("Operation failed: " + ioe.getLocalizedMessage());
-      if (LOG.isDebugEnabled()) {
-        LOG.debug("Operation failed", ioe);
-      }
-      return -1;
-    }
-  }
-  
-  protected int runCmd(String[] argv) throws Exception {
-    if (argv.length < 1) {
-      printUsage(errOut);
-      return -1;
-    }
-
-    String cmd = argv[0];
-
-    if (!cmd.startsWith("-")) {
-      errOut.println("Bad command '" + cmd + "': expected command starting with '-'");
-      printUsage(errOut);
-      return -1;
-    }
-
-    if ("-transitionToActive".equals(cmd)) {
-      return transitionToActive(argv);
-    } else if ("-transitionToStandby".equals(cmd)) {
-      return transitionToStandby(argv);
-    } else if ("-failover".equals(cmd)) {
-      return failover(argv);
-    } else if ("-getServiceState".equals(cmd)) {
-      return getServiceState(argv);
-    } else if ("-checkHealth".equals(cmd)) {
-      return checkHealth(argv);
-    } else if ("-help".equals(cmd)) {
-      return help(argv);
-    } else {
-      errOut.println(cmd.substring(1) + ": Unknown command");
-      printUsage(errOut);
-      return -1;
-    } 
-  }
-  
-  private int help(String[] argv) {
-    if (argv.length != 2) {
-      printUsage(errOut, "-help");
-      return -1;
-    }
-    String cmd = argv[1];
-    if (!cmd.startsWith("-")) {
-      cmd = "-" + cmd;
-    }
-    UsageInfo usageInfo = USAGE.get(cmd);
-    if (usageInfo == null) {
-      errOut.println(cmd + ": Unknown command");
-      printUsage(errOut);
-      return -1;
-    }
-    
-    errOut.println(cmd + " [" + usageInfo.args + "]: " + usageInfo.help);
-    return 0;
-  }
-  
-  private static class UsageInfo {
-    private final String args;
-    private final String help;
-    
-    public UsageInfo(String args, String help) {
-      this.args = args;
-      this.help = help;
-    }
-  }
-}

+ 0 - 129
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocol.java

@@ -1,129 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.security.AccessControlException;
-import org.apache.hadoop.security.KerberosInfo;
-
-import java.io.IOException;
-
-/**
- * Protocol interface that provides High Availability related primitives to
- * monitor and fail-over the service.
- * 
- * This interface could be used by HA frameworks to manage the service.
- */
-@KerberosInfo(
-    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public interface HAServiceProtocol {
-  /**
-   * Initial version of the protocol
-   */
-  public static final long versionID = 1L;
-
-  /**
-   * An HA service may be in active or standby state. During
-   * startup, it is in an unknown INITIALIZING state.
-   */
-  public enum HAServiceState {
-    INITIALIZING("initializing"),
-    ACTIVE("active"),
-    STANDBY("standby");
-
-    private String name;
-
-    HAServiceState(String name) {
-      this.name = name;
-    }
-
-    public String toString() {
-      return name;
-    }
-  }
-
-  /**
-   * Monitor the health of service. This periodically called by the HA
-   * frameworks to monitor the health of the service.
-   * 
-   * Service is expected to perform checks to ensure it is functional.
-   * If the service is not healthy due to failure or partial failure,
-   * it is expected to throw {@link HealthCheckFailedException}.
-   * The definition of service not healthy is left to the service.
-   * 
-   * Note that when health check of an Active service fails,
-   * failover to standby may be done.
-   * 
-   * @throws HealthCheckFailedException
-   *           if the health check of a service fails.
-   * @throws AccessControlException
-   *           if access is denied.
-   * @throws IOException
-   *           if other errors happen
-   */
-  public void monitorHealth() throws HealthCheckFailedException,
-                                     AccessControlException,
-                                     IOException;
-
-  /**
-   * Request service to transition to active state. No operation, if the
-   * service is already in active state.
-   * 
-   * @throws ServiceFailedException
-   *           if transition from standby to active fails.
-   * @throws AccessControlException
-   *           if access is denied.
-   * @throws IOException
-   *           if other errors happen
-   */
-  public void transitionToActive() throws ServiceFailedException,
-                                          AccessControlException,
-                                          IOException;
-
-  /**
-   * Request service to transition to standby state. No operation, if the
-   * service is already in standby state.
-   * 
-   * @throws ServiceFailedException
-   *           if transition from active to standby fails.
-   * @throws AccessControlException
-   *           if access is denied.
-   * @throws IOException
-   *           if other errors happen
-   */
-  public void transitionToStandby() throws ServiceFailedException,
-                                           AccessControlException,
-                                           IOException;
-
-  /**
-   * Return the current status of the service. The status indicates
-   * the current <em>state</em> (e.g ACTIVE/STANDBY) as well as
-   * some additional information. {@see HAServiceStatus}
-   * 
-   * @throws AccessControlException
-   *           if access is denied.
-   * @throws IOException
-   *           if other errors happen
-   */
-  public HAServiceStatus getServiceStatus() throws AccessControlException,
-                                                   IOException;
-}

+ 0 - 59
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceProtocolHelper.java

@@ -1,59 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ipc.RemoteException;
-
-/**
- * Helper for making {@link HAServiceProtocol} RPC calls. This helper
- * unwraps the {@link RemoteException} to specific exceptions.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class HAServiceProtocolHelper {
-  public static void monitorHealth(HAServiceProtocol svc)
-      throws IOException {
-    try {
-      svc.monitorHealth();
-    } catch (RemoteException e) {
-      throw e.unwrapRemoteException(HealthCheckFailedException.class);
-    }
-  }
-
-  public static void transitionToActive(HAServiceProtocol svc)
-      throws IOException {
-    try {
-      svc.transitionToActive();
-    } catch (RemoteException e) {
-      throw e.unwrapRemoteException(ServiceFailedException.class);
-    }
-  }
-
-  public static void transitionToStandby(HAServiceProtocol svc)
-      throws IOException {
-    try {
-      svc.transitionToStandby();
-    } catch (RemoteException e) {
-      throw e.unwrapRemoteException(ServiceFailedException.class);
-    }
-  }
-}

+ 0 - 56
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceStatus.java

@@ -1,56 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-
-@InterfaceAudience.Private
-public class HAServiceStatus {
-  private HAServiceState state;
-  private boolean readyToBecomeActive;
-  private String notReadyReason;
-  
-  public HAServiceStatus(HAServiceState state) {
-    this.state = state;
-  }
-
-  public HAServiceState getState() {
-    return state;
-  }
-
-  public HAServiceStatus setReadyToBecomeActive() {
-    this.readyToBecomeActive = true;
-    this.notReadyReason = null;
-    return this;
-  }
-  
-  public HAServiceStatus setNotReadyToBecomeActive(String reason) {
-    this.readyToBecomeActive = false;
-    this.notReadyReason = reason;
-    return this;
-  }
-
-  public boolean isReadyToBecomeActive() {
-    return readyToBecomeActive;
-  }
-
-  public String getNotReadyReason() {
-    return notReadyReason;
-  }
-}

+ 0 - 78
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HAServiceTarget.java

@@ -1,78 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import javax.net.SocketFactory;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.ha.protocolPB.HAServiceProtocolClientSideTranslatorPB;
-import org.apache.hadoop.net.NetUtils;
-
-/**
- * Represents a target of the client side HA administration commands.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public abstract class HAServiceTarget {
-
-  /**
-   * @return the IPC address of the target node.
-   */
-  public abstract InetSocketAddress getAddress();
-
-  /**
-   * @return a Fencer implementation configured for this target node
-   */
-  public abstract NodeFencer getFencer();
-  
-  /**
-   * @throws BadFencingConfigurationException if the fencing configuration
-   * appears to be invalid. This is divorced from the above
-   * {@link #getFencer()} method so that the configuration can be checked
-   * during the pre-flight phase of failover.
-   */
-  public abstract void checkFencingConfigured()
-      throws BadFencingConfigurationException;
-  
-  /**
-   * @return a proxy to connect to the target HA Service.
-   */
-  public HAServiceProtocol getProxy(Configuration conf, int timeoutMs)
-      throws IOException {
-    Configuration confCopy = new Configuration(conf);
-    // Lower the timeout so we quickly fail to connect
-    confCopy.setInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY, 1);
-    SocketFactory factory = NetUtils.getDefaultSocketFactory(confCopy);
-    return new HAServiceProtocolClientSideTranslatorPB(
-        getAddress(),
-        confCopy, factory, timeoutMs);
-  }
-
-  /**
-   * @return a proxy to connect to the target HA Service.
-   */
-  public final HAServiceProtocol getProxy() throws IOException {
-    return getProxy(new Configuration(), 0); // default conf, timeout
-  }
-}

+ 0 - 40
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthCheckFailedException.java

@@ -1,40 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * Exception thrown to indicate that health check of a service failed.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class HealthCheckFailedException extends IOException {
-  private static final long serialVersionUID = 1L;
-
-  public HealthCheckFailedException(final String message) {
-    super(message);
-  }
-  
-  public HealthCheckFailedException(String message, Throwable cause) {
-    super(message, cause);
-  }
-}

+ 0 - 293
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/HealthMonitor.java

@@ -1,293 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-import java.util.Collections;
-import java.util.LinkedList;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configuration;
-import static org.apache.hadoop.fs.CommonConfigurationKeys.*;
-import org.apache.hadoop.ha.HAServiceProtocol;
-import org.apache.hadoop.ha.HAServiceProtocol.HAServiceState;
-import org.apache.hadoop.ha.HealthCheckFailedException;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.util.Daemon;
-
-import com.google.common.base.Preconditions;
-
-/**
- * This class is a daemon which runs in a loop, periodically heartbeating
- * with an HA service. It is responsible for keeping track of that service's
- * health and exposing callbacks to the failover controller when the health
- * status changes.
- * 
- * Classes which need callbacks should implement the {@link Callback}
- * interface.
- */
-class HealthMonitor {
-  private static final Log LOG = LogFactory.getLog(
-      HealthMonitor.class);
-
-  private Daemon daemon;
-  private long connectRetryInterval;
-  private long checkIntervalMillis;
-  private long sleepAfterDisconnectMillis;
-
-  private int rpcTimeout;
-
-  private volatile boolean shouldRun = true;
-
-  /** The connected proxy */
-  private HAServiceProtocol proxy;
-
-  /** The HA service to monitor */
-  private final HAServiceTarget targetToMonitor;
-
-  private final Configuration conf;
-  
-  private State state = State.INITIALIZING;
-
-  /**
-   * Listeners for state changes
-   */
-  private List<Callback> callbacks = Collections.synchronizedList(
-      new LinkedList<Callback>());
-
-  private HAServiceStatus lastServiceState = new HAServiceStatus(
-      HAServiceState.INITIALIZING);
-  
-  enum State {
-    /**
-     * The health monitor is still starting up.
-     */
-    INITIALIZING,
-
-    /**
-     * The service is not responding to health check RPCs.
-     */
-    SERVICE_NOT_RESPONDING,
-
-    /**
-     * The service is connected and healthy.
-     */
-    SERVICE_HEALTHY,
-    
-    /**
-     * The service is running but unhealthy.
-     */
-    SERVICE_UNHEALTHY,
-    
-    /**
-     * The health monitor itself failed unrecoverably and can
-     * no longer provide accurate information.
-     */
-    HEALTH_MONITOR_FAILED;
-  }
-
-
-  HealthMonitor(Configuration conf, HAServiceTarget target) {
-    this.targetToMonitor = target;
-    this.conf = conf;
-    
-    this.sleepAfterDisconnectMillis = conf.getLong(
-        HA_HM_SLEEP_AFTER_DISCONNECT_KEY,
-        HA_HM_SLEEP_AFTER_DISCONNECT_DEFAULT);
-    this.checkIntervalMillis = conf.getLong(
-        HA_HM_CHECK_INTERVAL_KEY,
-        HA_HM_CHECK_INTERVAL_DEFAULT);
-    this.connectRetryInterval = conf.getLong(
-        HA_HM_CONNECT_RETRY_INTERVAL_KEY,
-        HA_HM_CONNECT_RETRY_INTERVAL_DEFAULT);
-    this.rpcTimeout = conf.getInt(
-        HA_HM_RPC_TIMEOUT_KEY,
-        HA_HM_RPC_TIMEOUT_DEFAULT);
-    
-    this.daemon = new MonitorDaemon();
-  }
-  
-  public void addCallback(Callback cb) {
-    this.callbacks.add(cb);
-  }
-  
-  public void removeCallback(Callback cb) {
-    callbacks.remove(cb);
-  }
-  
-  public void shutdown() {
-    LOG.info("Stopping HealthMonitor thread");
-    shouldRun = false;
-    daemon.interrupt();
-  }
-
-  /**
-   * @return the current proxy object to the underlying service.
-   * Note that this may return null in the case that the service
-   * is not responding. Also note that, even if the last indicated
-   * state is healthy, the service may have gone down in the meantime.
-   */
-  public synchronized HAServiceProtocol getProxy() {
-    return proxy;
-  }
-  
-  private void loopUntilConnected() throws InterruptedException {
-    tryConnect();
-    while (proxy == null) {
-      Thread.sleep(connectRetryInterval);
-      tryConnect();
-    }
-    assert proxy != null;
-  }
-
-  private void tryConnect() {
-    Preconditions.checkState(proxy == null);
-    
-    try {
-      synchronized (this) {
-        proxy = createProxy();
-      }
-    } catch (IOException e) {
-      LOG.warn("Could not connect to local service at " + targetToMonitor +
-          ": " + e.getMessage());
-      proxy = null;
-      enterState(State.SERVICE_NOT_RESPONDING);
-    }
-  }
-  
-  /**
-   * Connect to the service to be monitored. Stubbed out for easier testing.
-   */
-  protected HAServiceProtocol createProxy() throws IOException {
-    return targetToMonitor.getProxy(conf, rpcTimeout);
-  }
-
-  private void doHealthChecks() throws InterruptedException {
-    while (shouldRun) {
-      HAServiceStatus status = null;
-      boolean healthy = false;
-      try {
-        status = proxy.getServiceStatus();
-        proxy.monitorHealth();
-        healthy = true;
-      } catch (HealthCheckFailedException e) {
-        LOG.warn("Service health check failed for " + targetToMonitor
-            + ": " + e.getMessage());
-        enterState(State.SERVICE_UNHEALTHY);
-      } catch (Throwable t) {
-        LOG.warn("Transport-level exception trying to monitor health of " +
-            targetToMonitor + ": " + t.getLocalizedMessage());
-        RPC.stopProxy(proxy);
-        proxy = null;
-        enterState(State.SERVICE_NOT_RESPONDING);
-        Thread.sleep(sleepAfterDisconnectMillis);
-        return;
-      }
-      
-      if (status != null) {
-        setLastServiceStatus(status);
-      }
-      if (healthy) {
-        enterState(State.SERVICE_HEALTHY);
-      }
-
-      Thread.sleep(checkIntervalMillis);
-    }
-  }
-  
-  private synchronized void setLastServiceStatus(HAServiceStatus status) {
-    this.lastServiceState = status;
-  }
-
-  private synchronized void enterState(State newState) {
-    if (newState != state) {
-      LOG.info("Entering state " + newState);
-      state = newState;
-      synchronized (callbacks) {
-        for (Callback cb : callbacks) {
-          cb.enteredState(newState);
-        }
-      }
-    }
-  }
-
-  synchronized State getHealthState() {
-    return state;
-  }
-  
-  synchronized HAServiceStatus getLastServiceStatus() {
-    return lastServiceState;
-  }
-  
-  boolean isAlive() {
-    return daemon.isAlive();
-  }
-
-  void join() throws InterruptedException {
-    daemon.join();
-  }
-
-  void start() {
-    daemon.start();
-  }
-
-  private class MonitorDaemon extends Daemon {
-    private MonitorDaemon() {
-      super();
-      setName("Health Monitor for " + targetToMonitor);
-      setUncaughtExceptionHandler(new UncaughtExceptionHandler() {
-        @Override
-        public void uncaughtException(Thread t, Throwable e) {
-          LOG.fatal("Health monitor failed", e);
-          enterState(HealthMonitor.State.HEALTH_MONITOR_FAILED);
-        }
-      });
-    }
-    
-    @Override
-    public void run() {
-      while (shouldRun) {
-        try { 
-          loopUntilConnected();
-          doHealthChecks();
-        } catch (InterruptedException ie) {
-          Preconditions.checkState(!shouldRun,
-              "Interrupted but still supposed to run");
-        }
-      }
-    }
-  }
-  
-  /**
-   * Callback interface for state change events.
-   * 
-   * This interface is called from a single thread which also performs
-   * the health monitoring. If the callback processing takes a long time,
-   * no further health checks will be made during this period, nor will
-   * other registered callbacks be called.
-   * 
-   * If the callback itself throws an unchecked exception, no other
-   * callbacks following it will be called, and the health monitor
-   * will terminate, entering HEALTH_MONITOR_FAILED state.
-   */
-  static interface Callback {
-    void enteredState(State newState);
-  }
-}

+ 0 - 194
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/NodeFencer.java

@@ -1,194 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.util.List;
-import java.util.Map;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.util.ReflectionUtils;
-
-import com.google.common.collect.ImmutableMap;
-import com.google.common.collect.Lists;
-
-/**
- * This class parses the configured list of fencing methods, and
- * is responsible for trying each one in turn while logging informative
- * output.<p>
- * 
- * The fencing methods are configured as a carriage-return separated list.
- * Each line in the list is of the form:<p>
- * <code>com.example.foo.MyMethod(arg string)</code>
- * or
- * <code>com.example.foo.MyMethod</code>
- * The class provided must implement the {@link FenceMethod} interface.
- * The fencing methods that ship with Hadoop may also be referred to
- * by shortened names:<p>
- * <ul>
- * <li><code>shell(/path/to/some/script.sh args...)</code></li>
- * <li><code>sshfence(...)</code> (see {@link SshFenceByTcpPort})
- * </ul>
- */
-@InterfaceAudience.Private
-@InterfaceStability.Evolving
-public class NodeFencer {
-  public static final String CONF_METHODS_KEY =
-    "dfs.ha.fencing.methods";
-  
-  private static final String CLASS_RE = "([a-zA-Z0-9\\.\\$]+)";
-  private static final Pattern CLASS_WITH_ARGUMENT =
-    Pattern.compile(CLASS_RE + "\\((.+?)\\)");
-  private static final Pattern CLASS_WITHOUT_ARGUMENT =
-    Pattern.compile(CLASS_RE);
-  private static final Pattern HASH_COMMENT_RE =
-    Pattern.compile("#.*$");
-
-  private static final Log LOG = LogFactory.getLog(NodeFencer.class);
-
-  /**
-   * Standard fencing methods included with Hadoop.
-   */
-  private static final Map<String, Class<? extends FenceMethod>> STANDARD_METHODS =
-    ImmutableMap.<String, Class<? extends FenceMethod>>of(
-        "shell", ShellCommandFencer.class,
-        "sshfence", SshFenceByTcpPort.class);
-  
-  private final List<FenceMethodWithArg> methods;
-  
-  public NodeFencer(Configuration conf)
-      throws BadFencingConfigurationException {
-    this.methods = parseMethods(conf);
-  }
-  
-  public static NodeFencer create(Configuration conf)
-      throws BadFencingConfigurationException {
-    String confStr = conf.get(CONF_METHODS_KEY);
-    if (confStr == null) {
-      return null;
-    }
-    return new NodeFencer(conf);
-  }
-
-  public boolean fence(HAServiceTarget fromSvc) {
-    LOG.info("====== Beginning Service Fencing Process... ======");
-    int i = 0;
-    for (FenceMethodWithArg method : methods) {
-      LOG.info("Trying method " + (++i) + "/" + methods.size() +": " + method);
-      
-      try {
-        if (method.method.tryFence(fromSvc, method.arg)) {
-          LOG.info("====== Fencing successful by method " + method + " ======");
-          return true;
-        }
-      } catch (BadFencingConfigurationException e) {
-        LOG.error("Fencing method " + method + " misconfigured", e);
-        continue;
-      } catch (Throwable t) {
-        LOG.error("Fencing method " + method + " failed with an unexpected error.", t);
-        continue;
-      }
-      LOG.warn("Fencing method " + method + " was unsuccessful.");
-    }
-    
-    LOG.error("Unable to fence service by any configured method.");
-    return false;
-  }
-
-  private static List<FenceMethodWithArg> parseMethods(Configuration conf)
-      throws BadFencingConfigurationException {
-    String confStr = conf.get(CONF_METHODS_KEY);
-    String[] lines = confStr.split("\\s*\n\\s*");
-    
-    List<FenceMethodWithArg> methods = Lists.newArrayList();
-    for (String line : lines) {
-      line = HASH_COMMENT_RE.matcher(line).replaceAll("");
-      line = line.trim();
-      if (!line.isEmpty()) {
-        methods.add(parseMethod(conf, line));
-      }
-    }
-    
-    return methods;
-  }
-
-  private static FenceMethodWithArg parseMethod(Configuration conf, String line)
-      throws BadFencingConfigurationException {
-    Matcher m;
-    if ((m = CLASS_WITH_ARGUMENT.matcher(line)).matches()) {
-      String className = m.group(1);
-      String arg = m.group(2);
-      return createFenceMethod(conf, className, arg);
-    } else if ((m = CLASS_WITHOUT_ARGUMENT.matcher(line)).matches()) {
-      String className = m.group(1);
-      return createFenceMethod(conf, className, null);
-    } else {
-      throw new BadFencingConfigurationException(
-          "Unable to parse line: '" + line + "'");
-    }
-  }
-
-  private static FenceMethodWithArg createFenceMethod(
-      Configuration conf, String clazzName, String arg)
-      throws BadFencingConfigurationException {
-
-    Class<?> clazz;
-    try {
-      // See if it's a short name for one of the built-in methods
-      clazz = STANDARD_METHODS.get(clazzName);
-      if (clazz == null) {
-        // Try to instantiate the user's custom method
-        clazz = Class.forName(clazzName);
-      }
-    } catch (Exception e) {
-      throw new BadFencingConfigurationException(
-          "Could not find configured fencing method " + clazzName,
-          e);
-    }
-    
-    // Check that it implements the right interface
-    if (!FenceMethod.class.isAssignableFrom(clazz)) {
-      throw new BadFencingConfigurationException("Class " + clazzName +
-          " does not implement FenceMethod");
-    }
-    
-    FenceMethod method = (FenceMethod)ReflectionUtils.newInstance(
-        clazz, conf);
-    method.checkArgs(arg);
-    return new FenceMethodWithArg(method, arg);
-  }
-  
-  private static class FenceMethodWithArg {
-    private final FenceMethod method;
-    private final String arg;
-    
-    private FenceMethodWithArg(FenceMethod method, String arg) {
-      this.method = method;
-      this.arg = arg;
-    }
-    
-    public String toString() {
-      return method.getClass().getCanonicalName() + "(" + arg + ")";
-    }
-  }
-}

+ 0 - 42
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ServiceFailedException.java

@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-
-/**
- * Exception thrown to indicate that an operation performed
- * to modify the state of a service or application failed.
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class ServiceFailedException extends IOException {
-  private static final long serialVersionUID = 1L;
-
-  public ServiceFailedException(final String message) {
-    super(message);
-  }
-  
-  public ServiceFailedException(String message, Throwable cause) {
-    super(message, cause);
-  }
-}

+ 0 - 188
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ShellCommandFencer.java

@@ -1,188 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-import java.lang.reflect.Field;
-import java.net.InetSocketAddress;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configured;
-import org.apache.hadoop.util.StringUtils;
-
-import com.google.common.annotations.VisibleForTesting;
-
-/**
- * Fencing method that runs a shell command. It should be specified
- * in the fencing configuration like:<br>
- * <code>
- *   shell(/path/to/my/script.sh arg1 arg2 ...)
- * </code><br>
- * The string between '(' and ')' is passed directly to a bash shell and
- * may not include any closing parentheses.<p>
- * 
- * The shell command will be run with an environment set up to contain
- * all of the current Hadoop configuration variables, with the '_' character 
- * replacing any '.' characters in the configuration keys.<p>
- * 
- * If the shell command returns an exit code of 0, the fencing is
- * determined to be successful. If it returns any other exit code, the
- * fencing was not successful and the next fencing method in the list
- * will be attempted.<p>
- * 
- * <em>Note:</em> this fencing method does not implement any timeout.
- * If timeouts are necessary, they should be implemented in the shell
- * script itself (eg by forking a subshell to kill its parent in
- * some number of seconds).
- */
-public class ShellCommandFencer
-  extends Configured implements FenceMethod {
-
-  /** Length at which to abbreviate command in long messages */
-  private static final int ABBREV_LENGTH = 20;
-  
-  @VisibleForTesting
-  static Log LOG = LogFactory.getLog(
-      ShellCommandFencer.class);
-  
-  @Override
-  public void checkArgs(String args) throws BadFencingConfigurationException {
-    if (args == null || args.isEmpty()) {
-      throw new BadFencingConfigurationException(
-          "No argument passed to 'shell' fencing method");
-    }
-    // Nothing else we can really check without actually running the command
-  }
-
-  @Override
-  public boolean tryFence(HAServiceTarget target, String cmd) {
-    InetSocketAddress serviceAddr = target.getAddress();
-    List<String> cmdList = Arrays.asList(cmd.split("\\s+"));
-
-    // Create arg list with service as the first argument
-    List<String> argList = new ArrayList<String>();
-    argList.add(cmdList.get(0));
-    argList.add(serviceAddr.getHostName() + ":" + serviceAddr.getPort());
-    argList.addAll(cmdList.subList(1, cmdList.size()));
-    String cmdWithSvc = StringUtils.join(" ", argList);
-
-    ProcessBuilder builder = new ProcessBuilder(
-        "bash", "-e", "-c", cmdWithSvc);
-    setConfAsEnvVars(builder.environment());
-
-    Process p;
-    try {
-      p = builder.start();
-      p.getOutputStream().close();
-    } catch (IOException e) {
-      LOG.warn("Unable to execute " + cmd, e);
-      return false;
-    }
-    
-    String pid = tryGetPid(p);
-    LOG.info("Launched fencing command '" + cmd + "' with "
-        + ((pid != null) ? ("pid " + pid) : "unknown pid"));
-    
-    String logPrefix = abbreviate(cmd, ABBREV_LENGTH);
-    if (pid != null) {
-      logPrefix = "[PID " + pid + "] " + logPrefix;
-    }
-    
-    // Pump logs to stderr
-    StreamPumper errPumper = new StreamPumper(
-        LOG, logPrefix, p.getErrorStream(),
-        StreamPumper.StreamType.STDERR);
-    errPumper.start();
-    
-    StreamPumper outPumper = new StreamPumper(
-        LOG, logPrefix, p.getInputStream(),
-        StreamPumper.StreamType.STDOUT);
-    outPumper.start();
-    
-    int rc;
-    try {
-      rc = p.waitFor();
-      errPumper.join();
-      outPumper.join();
-    } catch (InterruptedException ie) {
-      LOG.warn("Interrupted while waiting for fencing command: " + cmd);
-      return false;
-    }
-    
-    return rc == 0;
-  }
-
-  /**
-   * Abbreviate a string by putting '...' in the middle of it,
-   * in an attempt to keep logs from getting too messy.
-   * @param cmd the string to abbreviate
-   * @param len maximum length to abbreviate to
-   * @return abbreviated string
-   */
-  static String abbreviate(String cmd, int len) {
-    if (cmd.length() > len && len >= 5) {
-      int firstHalf = (len - 3) / 2;
-      int rem = len - firstHalf - 3;
-      
-      return cmd.substring(0, firstHalf) + 
-        "..." + cmd.substring(cmd.length() - rem);
-    } else {
-      return cmd;
-    }
-  }
-  
-  /**
-   * Attempt to use evil reflection tricks to determine the
-   * pid of a launched process. This is helpful to ops
-   * if debugging a fencing process that might have gone
-   * wrong. If running on a system or JVM where this doesn't
-   * work, it will simply return null.
-   */
-  private static String tryGetPid(Process p) {
-    try {
-      Class<? extends Process> clazz = p.getClass();
-      if (clazz.getName().equals("java.lang.UNIXProcess")) {
-        Field f = clazz.getDeclaredField("pid");
-        f.setAccessible(true);
-        return String.valueOf(f.getInt(p));
-      } else {
-        LOG.trace("Unable to determine pid for " + p
-            + " since it is not a UNIXProcess");
-        return null;
-      }
-    } catch (Throwable t) {
-      LOG.trace("Unable to determine pid for " + p, t);
-      return null;
-    }
-  }
-
-  /**
-   * Set the environment of the subprocess to be the Configuration,
-   * with '.'s replaced by '_'s.
-   */
-  private void setConfAsEnvVars(Map<String, String> env) {
-    for (Map.Entry<String, String> pair : getConf()) {
-      env.put(pair.getKey().replace('.', '_'), pair.getValue());
-    }
-  }
-}

+ 0 - 314
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/SshFenceByTcpPort.java

@@ -1,314 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-import java.net.InetSocketAddress;
-import java.util.Collection;
-import java.util.regex.Matcher;
-import java.util.regex.Pattern;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.conf.Configured;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.jcraft.jsch.ChannelExec;
-import com.jcraft.jsch.JSch;
-import com.jcraft.jsch.JSchException;
-import com.jcraft.jsch.Session;
-
-/**
- * This fencing implementation sshes to the target node and uses 
- * <code>fuser</code> to kill the process listening on the service's
- * TCP port. This is more accurate than using "jps" since it doesn't 
- * require parsing, and will work even if there are multiple service
- * processes running on the same machine.<p>
- * It returns a successful status code if:
- * <ul>
- * <li><code>fuser</code> indicates it successfully killed a process, <em>or</em>
- * <li><code>nc -z</code> indicates that nothing is listening on the target port
- * </ul>
- * <p>
- * This fencing mechanism is configured as following in the fencing method
- * list:
- * <code>sshfence([[username][:ssh-port]])</code>
- * where the optional argument specifies the username and port to use
- * with ssh.
- * <p>
- * In order to achieve passwordless SSH, the operator must also configure
- * <code>dfs.ha.fencing.ssh.private-key-files<code> to point to an
- * SSH key that has passphrase-less access to the given username and host.
- */
-public class SshFenceByTcpPort extends Configured
-  implements FenceMethod {
-
-  static final Log LOG = LogFactory.getLog(
-      SshFenceByTcpPort.class);
-  
-  static final String CONF_CONNECT_TIMEOUT_KEY =
-    "dfs.ha.fencing.ssh.connect-timeout";
-  private static final int CONF_CONNECT_TIMEOUT_DEFAULT =
-    30*1000;
-  static final String CONF_IDENTITIES_KEY =
-    "dfs.ha.fencing.ssh.private-key-files";
-
-  /**
-   * Verify that the argument, if given, in the conf is parseable.
-   */
-  @Override
-  public void checkArgs(String argStr) throws BadFencingConfigurationException {
-    if (argStr != null) {
-      new Args(argStr);
-    }
-  }
-
-  @Override
-  public boolean tryFence(HAServiceTarget target, String argsStr)
-      throws BadFencingConfigurationException {
-
-    Args args = new Args(argsStr);
-    InetSocketAddress serviceAddr = target.getAddress();
-    String host = serviceAddr.getHostName();
-    
-    Session session;
-    try {
-      session = createSession(serviceAddr.getHostName(), args);
-    } catch (JSchException e) {
-      LOG.warn("Unable to create SSH session", e);
-      return false;
-    }
-
-    LOG.info("Connecting to " + host + "...");
-    
-    try {
-      session.connect(getSshConnectTimeout());
-    } catch (JSchException e) {
-      LOG.warn("Unable to connect to " + host
-          + " as user " + args.user, e);
-      return false;
-    }
-    LOG.info("Connected to " + host);
-
-    try {
-      return doFence(session, serviceAddr);
-    } catch (JSchException e) {
-      LOG.warn("Unable to achieve fencing on remote host", e);
-      return false;
-    } finally {
-      session.disconnect();
-    }
-  }
-
-
-  private Session createSession(String host, Args args) throws JSchException {
-    JSch jsch = new JSch();
-    for (String keyFile : getKeyFiles()) {
-      jsch.addIdentity(keyFile);
-    }
-    JSch.setLogger(new LogAdapter());
-
-    Session session = jsch.getSession(args.user, host, args.sshPort);
-    session.setConfig("StrictHostKeyChecking", "no");
-    return session;
-  }
-
-  private boolean doFence(Session session, InetSocketAddress serviceAddr)
-      throws JSchException {
-    int port = serviceAddr.getPort();
-    try {
-      LOG.info("Looking for process running on port " + port);
-      int rc = execCommand(session,
-          "PATH=$PATH:/sbin:/usr/sbin fuser -v -k -n tcp " + port);
-      if (rc == 0) {
-        LOG.info("Successfully killed process that was " +
-            "listening on port " + port);
-        // exit code 0 indicates the process was successfully killed.
-        return true;
-      } else if (rc == 1) {
-        // exit code 1 indicates either that the process was not running
-        // or that fuser didn't have root privileges in order to find it
-        // (eg running as a different user)
-        LOG.info(
-            "Indeterminate response from trying to kill service. " +
-            "Verifying whether it is running using nc...");
-        rc = execCommand(session, "nc -z " + serviceAddr.getHostName() +
-            " " + serviceAddr.getPort());
-        if (rc == 0) {
-          // the service is still listening - we are unable to fence
-          LOG.warn("Unable to fence - it is running but we cannot kill it");
-          return false;
-        } else {
-          LOG.info("Verified that the service is down.");
-          return true;          
-        }
-      } else {
-        // other 
-      }
-      LOG.info("rc: " + rc);
-      return rc == 0;
-    } catch (InterruptedException e) {
-      LOG.warn("Interrupted while trying to fence via ssh", e);
-      return false;
-    } catch (IOException e) {
-      LOG.warn("Unknown failure while trying to fence via ssh", e);
-      return false;
-    }
-  }
-  
-  /**
-   * Execute a command through the ssh session, pumping its
-   * stderr and stdout to our own logs.
-   */
-  private int execCommand(Session session, String cmd)
-      throws JSchException, InterruptedException, IOException {
-    LOG.debug("Running cmd: " + cmd);
-    ChannelExec exec = null;
-    try {
-      exec = (ChannelExec)session.openChannel("exec");
-      exec.setCommand(cmd);
-      exec.setInputStream(null);
-      exec.connect();
-
-      // Pump stdout of the command to our WARN logs
-      StreamPumper outPumper = new StreamPumper(LOG, cmd + " via ssh",
-          exec.getInputStream(), StreamPumper.StreamType.STDOUT);
-      outPumper.start();
-      
-      // Pump stderr of the command to our WARN logs
-      StreamPumper errPumper = new StreamPumper(LOG, cmd + " via ssh",
-          exec.getErrStream(), StreamPumper.StreamType.STDERR);
-      errPumper.start();
-      
-      outPumper.join();
-      errPumper.join();
-      return exec.getExitStatus();
-    } finally {
-      cleanup(exec);
-    }
-  }
-
-  private static void cleanup(ChannelExec exec) {
-    if (exec != null) {
-      try {
-        exec.disconnect();
-      } catch (Throwable t) {
-        LOG.warn("Couldn't disconnect ssh channel", t);
-      }
-    }
-  }
-
-  private int getSshConnectTimeout() {
-    return getConf().getInt(
-        CONF_CONNECT_TIMEOUT_KEY, CONF_CONNECT_TIMEOUT_DEFAULT);
-  }
-
-  private Collection<String> getKeyFiles() {
-    return getConf().getTrimmedStringCollection(CONF_IDENTITIES_KEY);
-  }
-  
-  /**
-   * Container for the parsed arg line for this fencing method.
-   */
-  @VisibleForTesting
-  static class Args {
-    private static final Pattern USER_PORT_RE = Pattern.compile(
-      "([^:]+?)?(?:\\:(\\d+))?");
-
-    private static final int DEFAULT_SSH_PORT = 22;
-
-    String user;
-    int sshPort;
-    
-    public Args(String arg) 
-        throws BadFencingConfigurationException {
-      user = System.getProperty("user.name");
-      sshPort = DEFAULT_SSH_PORT;
-
-      // Parse optional user and ssh port
-      if (arg != null && !"".equals(arg)) {
-        Matcher m = USER_PORT_RE.matcher(arg);
-        if (!m.matches()) {
-          throw new BadFencingConfigurationException(
-              "Unable to parse user and SSH port: "+ arg);
-        }
-        if (m.group(1) != null) {
-          user = m.group(1);
-        }
-        if (m.group(2) != null) {
-          sshPort = parseConfiggedPort(m.group(2));
-        }
-      }
-    }
-
-    private Integer parseConfiggedPort(String portStr)
-        throws BadFencingConfigurationException {
-      try {
-        return Integer.valueOf(portStr);
-      } catch (NumberFormatException nfe) {
-        throw new BadFencingConfigurationException(
-            "Port number '" + portStr + "' invalid");
-      }
-    }
-  }
-
-  /**
-   * Adapter from JSch's logger interface to our log4j
-   */
-  private static class LogAdapter implements com.jcraft.jsch.Logger {
-    static final Log LOG = LogFactory.getLog(
-        SshFenceByTcpPort.class.getName() + ".jsch");
-
-    public boolean isEnabled(int level) {
-      switch (level) {
-      case com.jcraft.jsch.Logger.DEBUG:
-        return LOG.isDebugEnabled();
-      case com.jcraft.jsch.Logger.INFO:
-        return LOG.isInfoEnabled();
-      case com.jcraft.jsch.Logger.WARN:
-        return LOG.isWarnEnabled();
-      case com.jcraft.jsch.Logger.ERROR:
-        return LOG.isErrorEnabled();
-      case com.jcraft.jsch.Logger.FATAL:
-        return LOG.isFatalEnabled();
-      default:
-        return false;
-      }
-    }
-      
-    public void log(int level, String message) {
-      switch (level) {
-      case com.jcraft.jsch.Logger.DEBUG:
-        LOG.debug(message);
-        break;
-      case com.jcraft.jsch.Logger.INFO:
-        LOG.info(message);
-        break;
-      case com.jcraft.jsch.Logger.WARN:
-        LOG.warn(message);
-        break;
-      case com.jcraft.jsch.Logger.ERROR:
-        LOG.error(message);
-        break;
-      case com.jcraft.jsch.Logger.FATAL:
-        LOG.fatal(message);
-        break;
-      }
-    }
-  }
-}

+ 0 - 90
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/StreamPumper.java

@@ -1,90 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.BufferedReader;
-import java.io.IOException;
-import java.io.InputStream;
-import java.io.InputStreamReader;
-
-import org.apache.commons.logging.Log;
-
-/**
- * Class responsible for pumping the streams of the subprocess
- * out to log4j. stderr is pumped to WARN level and stdout is
- * pumped to INFO level
- */
-class StreamPumper {
-  enum StreamType {
-    STDOUT, STDERR;
-  }
-
-  private final Log log;
-  
-  final Thread thread;
-  final String logPrefix;
-  final StreamPumper.StreamType type;
-  private final InputStream stream;
-  private boolean started = false;
-  
-  StreamPumper(final Log log, final String logPrefix,
-      final InputStream stream, final StreamType type) {
-    this.log = log;
-    this.logPrefix = logPrefix;
-    this.stream = stream;
-    this.type = type;
-    
-    thread = new Thread(new Runnable() {
-      @Override
-      public void run() {
-        try {
-          pump();
-        } catch (Throwable t) {
-          ShellCommandFencer.LOG.warn(logPrefix +
-              ": Unable to pump output from " + type,
-              t);
-        }
-      }
-    }, logPrefix + ": StreamPumper for " + type);
-    thread.setDaemon(true);
-  }
-  
-  void join() throws InterruptedException {
-    assert started;
-    thread.join();
-  }
-
-  void start() {
-    assert !started;
-    thread.start();
-    started = true;
-  }
-
-  protected void pump() throws IOException {
-    InputStreamReader inputStreamReader = new InputStreamReader(stream);
-    BufferedReader br = new BufferedReader(inputStreamReader);
-    String line = null;
-    while ((line = br.readLine()) != null) {
-      if (type == StreamType.STDOUT) {
-        log.info(logPrefix + ": " + line);
-      } else {
-        log.warn(logPrefix + ": " + line);          
-      }
-    }
-  }
-}

+ 0 - 387
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/ZKFailoverController.java

@@ -1,387 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha;
-
-import java.io.IOException;
-import java.security.PrivilegedAction;
-import java.util.List;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.HadoopIllegalArgumentException;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ha.ActiveStandbyElector.ActiveStandbyElectorCallback;
-import org.apache.hadoop.ha.HealthMonitor.State;
-import org.apache.hadoop.security.SecurityUtil;
-import org.apache.hadoop.util.Tool;
-import org.apache.hadoop.util.ToolRunner;
-import org.apache.zookeeper.ZooDefs.Ids;
-import org.apache.zookeeper.data.ACL;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.common.base.Preconditions;
-
-@InterfaceAudience.LimitedPrivate("HDFS")
-public abstract class ZKFailoverController implements Tool {
-
-  static final Log LOG = LogFactory.getLog(ZKFailoverController.class);
-  
-  // TODO: this should be namespace-scoped
-  public static final String ZK_QUORUM_KEY = "ha.zookeeper.quorum";
-  private static final String ZK_SESSION_TIMEOUT_KEY = "ha.zookeeper.session-timeout.ms";
-  private static final int ZK_SESSION_TIMEOUT_DEFAULT = 5*1000;
-  private static final String ZK_PARENT_ZNODE_KEY = "ha.zookeeper.parent-znode";
-  static final String ZK_PARENT_ZNODE_DEFAULT = "/hadoop-ha";
-
-  /** Unable to format the parent znode in ZK */
-  static final int ERR_CODE_FORMAT_DENIED = 2;
-  /** The parent znode doesn't exist in ZK */
-  static final int ERR_CODE_NO_PARENT_ZNODE = 3;
-  /** Fencing is not properly configured */
-  static final int ERR_CODE_NO_FENCER = 4;
-  
-  private Configuration conf;
-
-  private HealthMonitor healthMonitor;
-  private ActiveStandbyElector elector;
-
-  private HAServiceTarget localTarget;
-
-  private String parentZnode;
-
-  private State lastHealthState = State.INITIALIZING;
-
-  /** Set if a fatal error occurs */
-  private String fatalError = null;
-
-  @Override
-  public void setConf(Configuration conf) {
-    this.conf = conf;
-    localTarget = getLocalTarget();
-  }
-  
-
-  protected abstract byte[] targetToData(HAServiceTarget target);
-  protected abstract HAServiceTarget getLocalTarget();  
-  protected abstract HAServiceTarget dataToTarget(byte[] data);
-
-
-  @Override
-  public Configuration getConf() {
-    return conf;
-  }
-
-  @Override
-  public int run(final String[] args) throws Exception {
-    // TODO: need to hook DFS here to find the NN keytab info, etc,
-    // similar to what DFSHAAdmin does. Annoying that this is in common.
-    try {
-      return SecurityUtil.doAsLoginUserOrFatal(new PrivilegedAction<Integer>() {
-        @Override
-        public Integer run() {
-          try {
-            return doRun(args);
-          } catch (Exception t) {
-            throw new RuntimeException(t);
-          }
-        }
-      });
-    } catch (RuntimeException rte) {
-      throw (Exception)rte.getCause();
-    }
-  }
-  
-  private int doRun(String[] args)
-      throws HadoopIllegalArgumentException, IOException, InterruptedException {
-    initZK();
-    if (args.length > 0) {
-      if ("-formatZK".equals(args[0])) {
-        boolean force = false;
-        boolean interactive = true;
-        for (int i = 1; i < args.length; i++) {
-          if ("-force".equals(args[i])) {
-            force = true;
-          } else if ("-nonInteractive".equals(args[i])) {
-            interactive = false;
-          } else {
-            badArg(args[i]);
-          }
-        }
-        return formatZK(force, interactive);
-      } else {
-        badArg(args[0]);
-      }
-    }
-    
-    if (!elector.parentZNodeExists()) {
-      LOG.fatal("Unable to start failover controller. " +
-          "Parent znode does not exist.\n" +
-          "Run with -formatZK flag to initialize ZooKeeper.");
-      return ERR_CODE_NO_PARENT_ZNODE;
-    }
-
-    try {
-      localTarget.checkFencingConfigured();
-    } catch (BadFencingConfigurationException e) {
-      LOG.fatal("Fencing is not configured for " + localTarget + ".\n" +
-          "You must configure a fencing method before using automatic " +
-          "failover.", e);
-      return ERR_CODE_NO_FENCER;
-    }
-
-    initHM();
-    mainLoop();
-    return 0;
-  }
-
-  private void badArg(String arg) {
-    printUsage();
-    throw new HadoopIllegalArgumentException(
-        "Bad argument: " + arg);
-  }
-
-  private void printUsage() {
-    System.err.println("Usage: " + this.getClass().getSimpleName() +
-        " [-formatZK [-force | -nonInteractive]]");
-  }
-
-  private int formatZK(boolean force, boolean interactive)
-      throws IOException, InterruptedException {
-    if (elector.parentZNodeExists()) {
-      if (!force && (!interactive || !confirmFormat())) {
-        return ERR_CODE_FORMAT_DENIED;
-      }
-      
-      try {
-        elector.clearParentZNode();
-      } catch (IOException e) {
-        LOG.error("Unable to clear zk parent znode", e);
-        return 1;
-      }
-    }
-    
-    elector.ensureParentZNode();
-    return 0;
-  }
-
-  private boolean confirmFormat() {
-    System.err.println(
-        "===============================================\n" +
-        "The configured parent znode " + parentZnode + " already exists.\n" +
-        "Are you sure you want to clear all failover information from\n" +
-        "ZooKeeper?\n" +
-        "WARNING: Before proceeding, ensure that all HDFS services and\n" +
-        "failover controllers are stopped!\n" +
-        "===============================================");
-    try {
-      return ToolRunner.confirmPrompt("Proceed formatting " + parentZnode + "?");
-    } catch (IOException e) {
-      LOG.debug("Failed to confirm", e);
-      return false;
-    }
-  }
-
-  // ------------------------------------------
-  // Begin actual guts of failover controller
-  // ------------------------------------------
-  
-  private void initHM() {
-    healthMonitor = new HealthMonitor(conf, localTarget);
-    healthMonitor.addCallback(new HealthCallbacks());
-    healthMonitor.start();
-  }
-
-  private void initZK() throws HadoopIllegalArgumentException, IOException {
-    String zkQuorum = conf.get(ZK_QUORUM_KEY);
-    int zkTimeout = conf.getInt(ZK_SESSION_TIMEOUT_KEY,
-        ZK_SESSION_TIMEOUT_DEFAULT);
-    parentZnode = conf.get(ZK_PARENT_ZNODE_KEY,
-        ZK_PARENT_ZNODE_DEFAULT);
-    // TODO: need ZK ACL support in config, also maybe auth!
-    List<ACL> zkAcls = Ids.OPEN_ACL_UNSAFE;
-
-    Preconditions.checkArgument(zkQuorum != null,
-        "Missing required configuration '%s' for ZooKeeper quorum",
-        ZK_QUORUM_KEY);
-    Preconditions.checkArgument(zkTimeout > 0,
-        "Invalid ZK session timeout %s", zkTimeout);
-    
-
-    elector = new ActiveStandbyElector(zkQuorum,
-        zkTimeout, parentZnode, zkAcls, new ElectorCallbacks());
-  }
-  
-  private synchronized void mainLoop() throws InterruptedException {
-    while (fatalError == null) {
-      wait();
-    }
-    assert fatalError != null; // only get here on fatal
-    throw new RuntimeException(
-        "ZK Failover Controller failed: " + fatalError);
-  }
-  
-  private synchronized void fatalError(String err) {
-    LOG.fatal("Fatal error occurred:" + err);
-    fatalError = err;
-    notifyAll();
-  }
-  
-  private synchronized void becomeActive() {
-    LOG.info("Trying to make " + localTarget + " active...");
-    try {
-      localTarget.getProxy().transitionToActive();
-      LOG.info("Successfully transitioned " + localTarget +
-          " to active state");
-    } catch (Throwable t) {
-      LOG.fatal("Couldn't make " + localTarget + " active", t);
-      elector.quitElection(true);
-/*
-* TODO:
-* we need to make sure that if we get fenced and then quickly restarted,
-* none of these calls will retry across the restart boundary
-* perhaps the solution is that, whenever the nn starts, it gets a unique
-* ID, and when we start becoming active, we record it, and then any future
-* calls use the same ID
-*/
-      
-    }
-  }
-
-  private synchronized void becomeStandby() {
-    LOG.info("ZK Election indicated that " + localTarget +
-        " should become standby");
-    try {
-      localTarget.getProxy().transitionToStandby();
-      LOG.info("Successfully transitioned " + localTarget +
-          " to standby state");
-    } catch (Exception e) {
-      LOG.error("Couldn't transition " + localTarget + " to standby state",
-          e);
-      // TODO handle this. It's a likely case since we probably got fenced
-      // at the same time.
-    }
-  }
-
-  /**
-   * @return the last health state passed to the FC
-   * by the HealthMonitor.
-   */
-  @VisibleForTesting
-  State getLastHealthState() {
-    return lastHealthState;
-  }
-  
-  @VisibleForTesting
-  ActiveStandbyElector getElectorForTests() {
-    return elector;
-  }
-
-  /**
-   * Callbacks from elector
-   */
-  class ElectorCallbacks implements ActiveStandbyElectorCallback {
-    @Override
-    public void becomeActive() {
-      ZKFailoverController.this.becomeActive();
-    }
-
-    @Override
-    public void becomeStandby() {
-      ZKFailoverController.this.becomeStandby();
-    }
-
-    @Override
-    public void enterNeutralMode() {
-    }
-
-    @Override
-    public void notifyFatalError(String errorMessage) {
-      fatalError(errorMessage);
-    }
-
-    @Override
-    public void fenceOldActive(byte[] data) {
-      HAServiceTarget target = dataToTarget(data);
-      
-      LOG.info("Should fence: " + target);
-      boolean gracefulWorked =
-        FailoverController.tryGracefulFence(conf, target);
-      if (gracefulWorked) {
-        // It's possible that it's in standby but just about to go into active,
-        // no? Is there some race here?
-        LOG.info("Successfully transitioned " + target + " to standby " +
-            "state without fencing");
-        return;
-      }
-      
-      try {
-        target.checkFencingConfigured();
-      } catch (BadFencingConfigurationException e) {
-        LOG.error("Couldn't fence old active " + target, e);
-        // TODO: see below todo
-        throw new RuntimeException(e);
-      }
-      
-      if (!target.getFencer().fence(target)) {
-        // TODO: this will end up in some kind of tight loop,
-        // won't it? We need some kind of backoff
-        throw new RuntimeException("Unable to fence " + target);
-      }
-    }
-  }
-  
-  /**
-   * Callbacks from HealthMonitor
-   */
-  class HealthCallbacks implements HealthMonitor.Callback {
-    @Override
-    public void enteredState(HealthMonitor.State newState) {
-      LOG.info("Local service " + localTarget +
-          " entered state: " + newState);
-      switch (newState) {
-      case SERVICE_HEALTHY:
-        LOG.info("Joining master election for " + localTarget);
-        elector.joinElection(targetToData(localTarget));
-        break;
-        
-      case INITIALIZING:
-        LOG.info("Ensuring that " + localTarget + " does not " +
-            "participate in active master election");
-        elector.quitElection(false);
-        break;
-
-      case SERVICE_UNHEALTHY:
-      case SERVICE_NOT_RESPONDING:
-        LOG.info("Quitting master election for " + localTarget +
-            " and marking that fencing is necessary");
-        elector.quitElection(true);
-        break;
-        
-      case HEALTH_MONITOR_FAILED:
-        fatalError("Health monitor failed!");
-        break;
-        
-      default:
-        throw new IllegalArgumentException("Unhandled state:" + newState);
-      }
-      
-      lastHealthState = newState;
-    }
-  }
-}

+ 0 - 155
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolClientSideTranslatorPB.java

@@ -1,155 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha.protocolPB;
-
-import java.io.Closeable;
-import java.io.IOException;
-import java.net.InetSocketAddress;
-
-import javax.net.SocketFactory;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ha.HAServiceProtocol;
-import org.apache.hadoop.ha.HAServiceStatus;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
-import org.apache.hadoop.ipc.ProtobufHelper;
-import org.apache.hadoop.ipc.ProtobufRpcEngine;
-import org.apache.hadoop.ipc.ProtocolSignature;
-import org.apache.hadoop.ipc.ProtocolTranslator;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.security.UserGroupInformation;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-/**
- * This class is the client side translator to translate the requests made on
- * {@link HAServiceProtocol} interfaces to the RPC server implementing
- * {@link HAServiceProtocolPB}.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public class HAServiceProtocolClientSideTranslatorPB implements
-    HAServiceProtocol, Closeable, ProtocolTranslator {
-  /** RpcController is not used and hence is set to null */
-  private final static RpcController NULL_CONTROLLER = null;
-  private final static MonitorHealthRequestProto MONITOR_HEALTH_REQ = 
-      MonitorHealthRequestProto.newBuilder().build();
-  private final static TransitionToActiveRequestProto TRANSITION_TO_ACTIVE_REQ = 
-      TransitionToActiveRequestProto.newBuilder().build();
-  private final static TransitionToStandbyRequestProto TRANSITION_TO_STANDBY_REQ = 
-      TransitionToStandbyRequestProto.newBuilder().build();
-  private final static GetServiceStatusRequestProto GET_SERVICE_STATUS_REQ = 
-      GetServiceStatusRequestProto.newBuilder().build();
-  
-  private final HAServiceProtocolPB rpcProxy;
-
-  public HAServiceProtocolClientSideTranslatorPB(InetSocketAddress addr,
-      Configuration conf) throws IOException {
-    RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
-        ProtobufRpcEngine.class);
-    rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
-        RPC.getProtocolVersion(HAServiceProtocolPB.class), addr, conf);
-  }
-  
-  public HAServiceProtocolClientSideTranslatorPB(
-      InetSocketAddress addr, Configuration conf,
-      SocketFactory socketFactory, int timeout) throws IOException {
-    RPC.setProtocolEngine(conf, HAServiceProtocolPB.class,
-        ProtobufRpcEngine.class);
-    rpcProxy = RPC.getProxy(HAServiceProtocolPB.class,
-        RPC.getProtocolVersion(HAServiceProtocolPB.class), addr,
-        UserGroupInformation.getCurrentUser(), conf, socketFactory, timeout);
-  }
-
-  @Override
-  public void monitorHealth() throws IOException {
-    try {
-      rpcProxy.monitorHealth(NULL_CONTROLLER, MONITOR_HEALTH_REQ);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  @Override
-  public void transitionToActive() throws IOException {
-    try {
-      rpcProxy.transitionToActive(NULL_CONTROLLER, TRANSITION_TO_ACTIVE_REQ);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  @Override
-  public void transitionToStandby() throws IOException {
-    try {
-      rpcProxy.transitionToStandby(NULL_CONTROLLER, TRANSITION_TO_STANDBY_REQ);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-  }
-
-  @Override
-  public HAServiceStatus getServiceStatus() throws IOException {
-    GetServiceStatusResponseProto status;
-    try {
-      status = rpcProxy.getServiceStatus(NULL_CONTROLLER,
-          GET_SERVICE_STATUS_REQ);
-    } catch (ServiceException e) {
-      throw ProtobufHelper.getRemoteException(e);
-    }
-    
-    HAServiceStatus ret = new HAServiceStatus(
-        convert(status.getState()));
-    if (status.getReadyToBecomeActive()) {
-      ret.setReadyToBecomeActive();
-    } else {
-      ret.setNotReadyToBecomeActive(status.getNotReadyReason());
-    }
-    return ret;
-  }
-  
-  private HAServiceState convert(HAServiceStateProto state) {
-    switch(state) {
-    case ACTIVE:
-      return HAServiceState.ACTIVE;
-    case STANDBY:
-      return HAServiceState.STANDBY;
-    case INITIALIZING:
-    default:
-      return HAServiceState.INITIALIZING;
-    }
-  }
-  
-  @Override
-  public void close() {
-    RPC.stopProxy(rpcProxy);
-  }
-
-  @Override
-  public Object getUnderlyingProxyObject() {
-    return rpcProxy;
-  }
-}

+ 0 - 39
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolPB.java

@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha.protocolPB;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.fs.CommonConfigurationKeys;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceProtocolService;
-import org.apache.hadoop.ipc.ProtocolInfo;
-import org.apache.hadoop.ipc.VersionedProtocol;
-import org.apache.hadoop.security.KerberosInfo;
-
-@KerberosInfo(
-    serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
-@ProtocolInfo(protocolName = "org.apache.hadoop.ha.HAServiceProtocol", 
-    protocolVersion = 1)
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public interface HAServiceProtocolPB extends
-    HAServiceProtocolService.BlockingInterface, VersionedProtocol {
-  /**
-   * If any methods need annotation, it can be added here
-   */
-}

+ 0 - 152
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/protocolPB/HAServiceProtocolServerSideTranslatorPB.java

@@ -1,152 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ha.protocolPB;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ha.HAServiceProtocol;
-import org.apache.hadoop.ha.HAServiceStatus;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusRequestProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.GetServiceStatusResponseProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.HAServiceStateProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthRequestProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.MonitorHealthResponseProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveRequestProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToActiveResponseProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyRequestProto;
-import org.apache.hadoop.ha.proto.HAServiceProtocolProtos.TransitionToStandbyResponseProto;
-import org.apache.hadoop.ipc.ProtocolSignature;
-import org.apache.hadoop.ipc.RPC;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-/**
- * This class is used on the server side. Calls come across the wire for the
- * for protocol {@link HAServiceProtocolPB}.
- * This class translates the PB data types
- * to the native data types used inside the NN as specified in the generic
- * ClientProtocol.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public class HAServiceProtocolServerSideTranslatorPB implements
-    HAServiceProtocolPB {
-  private final HAServiceProtocol server;
-  private static final MonitorHealthResponseProto MONITOR_HEALTH_RESP = 
-      MonitorHealthResponseProto.newBuilder().build();
-  private static final TransitionToActiveResponseProto TRANSITION_TO_ACTIVE_RESP = 
-      TransitionToActiveResponseProto.newBuilder().build();
-  private static final TransitionToStandbyResponseProto TRANSITION_TO_STANDBY_RESP = 
-      TransitionToStandbyResponseProto.newBuilder().build();
-  
-  public HAServiceProtocolServerSideTranslatorPB(HAServiceProtocol server) {
-    this.server = server;
-  }
-
-  @Override
-  public MonitorHealthResponseProto monitorHealth(RpcController controller,
-      MonitorHealthRequestProto request) throws ServiceException {
-    try {
-      server.monitorHealth();
-      return MONITOR_HEALTH_RESP;
-    } catch(IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public TransitionToActiveResponseProto transitionToActive(
-      RpcController controller, TransitionToActiveRequestProto request)
-      throws ServiceException {
-    try {
-      server.transitionToActive();
-      return TRANSITION_TO_ACTIVE_RESP;
-    } catch(IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public TransitionToStandbyResponseProto transitionToStandby(
-      RpcController controller, TransitionToStandbyRequestProto request)
-      throws ServiceException {
-    try {
-      server.transitionToStandby();
-      return TRANSITION_TO_STANDBY_RESP;
-    } catch(IOException e) {
-      throw new ServiceException(e);
-    }
-  }
-
-  @Override
-  public GetServiceStatusResponseProto getServiceStatus(RpcController controller,
-      GetServiceStatusRequestProto request) throws ServiceException {
-    HAServiceStatus s;
-    try {
-      s = server.getServiceStatus();
-    } catch(IOException e) {
-      throw new ServiceException(e);
-    }
-    
-    HAServiceStateProto retState;
-    switch (s.getState()) {
-    case ACTIVE:
-      retState = HAServiceStateProto.ACTIVE;
-      break;
-    case STANDBY:
-      retState = HAServiceStateProto.STANDBY;
-      break;
-    case INITIALIZING:
-    default:
-      retState = HAServiceStateProto.INITIALIZING;
-      break;
-    }
-    
-    GetServiceStatusResponseProto.Builder ret =
-      GetServiceStatusResponseProto.newBuilder()
-        .setState(retState)
-        .setReadyToBecomeActive(s.isReadyToBecomeActive());
-    if (!s.isReadyToBecomeActive()) {
-      ret.setNotReadyReason(s.getNotReadyReason());
-    }
-    return ret.build();
-  }
-
-  @Override
-  public long getProtocolVersion(String protocol, long clientVersion)
-      throws IOException {
-    return RPC.getProtocolVersion(HAServiceProtocolPB.class);
-  }
-
-  @Override
-  public ProtocolSignature getProtocolSignature(String protocol,
-      long clientVersion, int clientMethodsHash) throws IOException {
-    if (!protocol.equals(RPC.getProtocolName(HAServiceProtocolPB.class))) {
-      throw new IOException("Serverside implements " +
-          RPC.getProtocolName(HAServiceProtocolPB.class) +
-          ". The following requested protocol is unknown: " + protocol);
-    }
-
-    return ProtocolSignature.getProtocolSignature(clientMethodsHash,
-        RPC.getProtocolVersion(HAServiceProtocolPB.class),
-        HAServiceProtocolPB.class);
-  }
-}

+ 0 - 4
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer.java

@@ -99,8 +99,6 @@ public class HttpServer implements FilterContainer {
   public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
   public static final String CONF_CONTEXT_ATTRIBUTE = "hadoop.conf";
   static final String ADMINS_ACL = "admins.acl";
   static final String ADMINS_ACL = "admins.acl";
 
 
-  public static final String BIND_ADDRESS = "bind.address";
-
   private AccessControlList adminsAcl;
   private AccessControlList adminsAcl;
 
 
   protected final Server webServer;
   protected final Server webServer;
@@ -244,8 +242,6 @@ public class HttpServer implements FilterContainer {
     addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
     addGlobalFilter("safety", QuotingInputFilter.class.getName(), null);
     final FilterInitializer[] initializers = getFilterInitializers(conf); 
     final FilterInitializer[] initializers = getFilterInitializers(conf); 
     if (initializers != null) {
     if (initializers != null) {
-      conf = new Configuration(conf);
-      conf.set(BIND_ADDRESS, bindAddress);
       for(FilterInitializer c : initializers) {
       for(FilterInitializer c : initializers) {
         c.initFilter(this, conf);
         c.initFilter(this, conf);
       }
       }

+ 0 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/Text.java

@@ -239,7 +239,6 @@ public class Text extends BinaryComparable
    */
    */
   public void clear() {
   public void clear() {
     length = 0;
     length = 0;
-    bytes = EMPTY_BYTES;
   }
   }
 
 
   /*
   /*

+ 7 - 15
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/DefaultFailoverProxyProvider.java

@@ -17,44 +17,36 @@
  */
  */
 package org.apache.hadoop.io.retry;
 package org.apache.hadoop.io.retry;
 
 
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ipc.RPC;
 
 
 /**
 /**
  * An implementation of {@link FailoverProxyProvider} which does nothing in the
  * An implementation of {@link FailoverProxyProvider} which does nothing in the
  * event of failover, and always returns the same proxy object. 
  * event of failover, and always returns the same proxy object. 
  */
  */
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class DefaultFailoverProxyProvider<T> implements FailoverProxyProvider<T> {
+public class DefaultFailoverProxyProvider implements FailoverProxyProvider {
   
   
-  private T proxy;
-  private Class<T> iface;
+  private Object proxy;
+  private Class<?> iface;
   
   
-  public DefaultFailoverProxyProvider(Class<T> iface, T proxy) {
+  public DefaultFailoverProxyProvider(Class<?> iface, Object proxy) {
     this.proxy = proxy;
     this.proxy = proxy;
     this.iface = iface;
     this.iface = iface;
   }
   }
 
 
   @Override
   @Override
-  public Class<T> getInterface() {
+  public Class<?> getInterface() {
     return iface;
     return iface;
   }
   }
 
 
   @Override
   @Override
-  public T getProxy() {
+  public Object getProxy() {
     return proxy;
     return proxy;
   }
   }
 
 
   @Override
   @Override
-  public void performFailover(T currentProxy) {
+  public void performFailover(Object currentProxy) {
     // Nothing to do.
     // Nothing to do.
   }
   }
 
 
-  @Override
-  public void close() throws IOException {
-    RPC.stopProxy(proxy);
-  }
-
 }
 }

+ 4 - 6
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/FailoverProxyProvider.java

@@ -17,8 +17,6 @@
  */
  */
 package org.apache.hadoop.io.retry;
 package org.apache.hadoop.io.retry;
 
 
-import java.io.Closeable;
-
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
@@ -29,7 +27,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  * {@link RetryPolicy}.
  * {@link RetryPolicy}.
  */
  */
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public interface FailoverProxyProvider<T> extends Closeable {
+public interface FailoverProxyProvider {
 
 
   /**
   /**
    * Get the proxy object which should be used until the next failover event
    * Get the proxy object which should be used until the next failover event
@@ -37,7 +35,7 @@ public interface FailoverProxyProvider<T> extends Closeable {
    * 
    * 
    * @return the proxy object to invoke methods upon
    * @return the proxy object to invoke methods upon
    */
    */
-  public T getProxy();
+  public Object getProxy();
 
 
   /**
   /**
    * Called whenever the associated {@link RetryPolicy} determines that an error
    * Called whenever the associated {@link RetryPolicy} determines that an error
@@ -46,7 +44,7 @@ public interface FailoverProxyProvider<T> extends Closeable {
    * @param currentProxy the proxy object which was being used before this
    * @param currentProxy the proxy object which was being used before this
    *        failover event
    *        failover event
    */
    */
-  public void performFailover(T currentProxy);
+  public void performFailover(Object currentProxy);
 
 
   /**
   /**
    * Return a reference to the interface this provider's proxy objects actually
    * Return a reference to the interface this provider's proxy objects actually
@@ -58,5 +56,5 @@ public interface FailoverProxyProvider<T> extends Closeable {
    * @return the interface implemented by the proxy objects returned by
    * @return the interface implemented by the proxy objects returned by
    *         {@link FailoverProxyProvider#getProxy()}
    *         {@link FailoverProxyProvider#getProxy()}
    */
    */
-  public Class<T> getInterface();
+  public Class<?> getInterface();
 }
 }

+ 23 - 98
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryInvocationHandler.java

@@ -17,30 +17,19 @@
  */
  */
 package org.apache.hadoop.io.retry;
 package org.apache.hadoop.io.retry;
 
 
-import java.io.IOException;
+import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.Method;
 import java.lang.reflect.Method;
 import java.util.Collections;
 import java.util.Collections;
 import java.util.Map;
 import java.util.Map;
-import java.util.concurrent.atomic.AtomicLong;
 
 
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.Log;
 import org.apache.commons.logging.LogFactory;
 import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
 import org.apache.hadoop.io.retry.RetryPolicy.RetryAction;
-import org.apache.hadoop.util.ThreadUtil;
-import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.RPC;
-import org.apache.hadoop.ipc.RpcInvocationHandler;
 
 
-class RetryInvocationHandler implements RpcInvocationHandler {
+class RetryInvocationHandler implements InvocationHandler {
   public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
   public static final Log LOG = LogFactory.getLog(RetryInvocationHandler.class);
   private FailoverProxyProvider proxyProvider;
   private FailoverProxyProvider proxyProvider;
-
-  /**
-   * The number of times the associated proxyProvider has ever been failed over.
-   */
-  private long proxyProviderFailoverCount = 0;
-  private volatile boolean hasMadeASuccessfulCall = false;
   
   
   private RetryPolicy defaultPolicy;
   private RetryPolicy defaultPolicy;
   private Map<String,RetryPolicy> methodNameToPolicyMap;
   private Map<String,RetryPolicy> methodNameToPolicyMap;
@@ -69,94 +58,40 @@ class RetryInvocationHandler implements RpcInvocationHandler {
       policy = defaultPolicy;
       policy = defaultPolicy;
     }
     }
     
     
-    // The number of times this method invocation has been failed over.
-    int invocationFailoverCount = 0;
+    int failovers = 0;
     int retries = 0;
     int retries = 0;
     while (true) {
     while (true) {
-      // The number of times this invocation handler has ever been failed over,
-      // before this method invocation attempt. Used to prevent concurrent
-      // failed method invocations from triggering multiple failover attempts.
-      long invocationAttemptFailoverCount;
-      synchronized (proxyProvider) {
-        invocationAttemptFailoverCount = proxyProviderFailoverCount;
-      }
       try {
       try {
-        Object ret = invokeMethod(method, args);
-        hasMadeASuccessfulCall = true;
-        return ret;
+        return invokeMethod(method, args);
       } catch (Exception e) {
       } catch (Exception e) {
         boolean isMethodIdempotent = proxyProvider.getInterface()
         boolean isMethodIdempotent = proxyProvider.getInterface()
             .getMethod(method.getName(), method.getParameterTypes())
             .getMethod(method.getName(), method.getParameterTypes())
             .isAnnotationPresent(Idempotent.class);
             .isAnnotationPresent(Idempotent.class);
-        RetryAction action = policy.shouldRetry(e, retries++, invocationFailoverCount,
+        RetryAction action = policy.shouldRetry(e, retries++, failovers,
             isMethodIdempotent);
             isMethodIdempotent);
-        if (action.action == RetryAction.RetryDecision.FAIL) {
-          if (action.reason != null) {
-            LOG.warn("Exception while invoking " + 
-                currentProxy.getClass() + "." + method.getName() +
-                ". Not retrying because " + action.reason, e);
-          }
-          throw e;
-        } else { // retry or failover
-          // avoid logging the failover if this is the first call on this
-          // proxy object, and we successfully achieve the failover without
-          // any flip-flopping
-          boolean worthLogging = 
-            !(invocationFailoverCount == 0 && !hasMadeASuccessfulCall);
-          worthLogging |= LOG.isDebugEnabled();
-          if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY &&
-              worthLogging) {
-            String msg = "Exception while invoking " + method.getName()
-              + " of class " + currentProxy.getClass().getSimpleName();
-            if (invocationFailoverCount > 0) {
-              msg += " after " + invocationFailoverCount + " fail over attempts"; 
-            }
-            msg += ". Trying to fail over " + formatSleepMessage(action.delayMillis);
-            if (LOG.isDebugEnabled()) {
-              LOG.debug(msg, e);
-            } else {
-              LOG.warn(msg);
-            }
-          } else {
-            if(LOG.isDebugEnabled()) {
-              LOG.debug("Exception while invoking " + method.getName()
-                  + " of class " + currentProxy.getClass().getSimpleName() +
-                  ". Retrying " + formatSleepMessage(action.delayMillis), e);
-            }
-          }
-          
-          if (action.delayMillis > 0) {
-            ThreadUtil.sleepAtLeastIgnoreInterrupts(action.delayMillis);
-          }
-          
-          if (action.action == RetryAction.RetryDecision.FAILOVER_AND_RETRY) {
-            // Make sure that concurrent failed method invocations only cause a
-            // single actual fail over.
-            synchronized (proxyProvider) {
-              if (invocationAttemptFailoverCount == proxyProviderFailoverCount) {
-                proxyProvider.performFailover(currentProxy);
-                proxyProviderFailoverCount++;
-                currentProxy = proxyProvider.getProxy();
-              } else {
-                LOG.warn("A failover has occurred since the start of this method"
-                    + " invocation attempt.");
-              }
-            }
-            invocationFailoverCount++;
+        if (action == RetryAction.FAIL) {
+          LOG.warn("Exception while invoking " + method.getName()
+                   + " of " + currentProxy.getClass() + ". Not retrying.", e);
+          if (!method.getReturnType().equals(Void.TYPE)) {
+            throw e; // non-void methods can't fail without an exception
           }
           }
+          return null;
+        } else if (action == RetryAction.FAILOVER_AND_RETRY) {
+          LOG.warn("Exception while invoking " + method.getName()
+              + " of " + currentProxy.getClass()
+              + ". Trying to fail over.", e);
+          failovers++;
+          proxyProvider.performFailover(currentProxy);
+          currentProxy = proxyProvider.getProxy();
+        }
+        if(LOG.isDebugEnabled()) {
+          LOG.debug("Exception while invoking " + method.getName()
+              + " of " + currentProxy.getClass() + ". Retrying.", e);
         }
         }
       }
       }
     }
     }
   }
   }
-  
-  private static String formatSleepMessage(long millis) {
-    if (millis > 0) {
-      return "after sleeping for " + millis + "ms.";
-    } else {
-      return "immediately.";
-    }
-  }
-  
+
   private Object invokeMethod(Method method, Object[] args) throws Throwable {
   private Object invokeMethod(Method method, Object[] args) throws Throwable {
     try {
     try {
       if (!method.isAccessible()) {
       if (!method.isAccessible()) {
@@ -168,14 +103,4 @@ class RetryInvocationHandler implements RpcInvocationHandler {
     }
     }
   }
   }
 
 
-  @Override
-  public void close() throws IOException {
-    proxyProvider.close();
-  }
-
-  @Override //RpcInvocationHandler
-  public ConnectionId getConnectionId() {
-    return RPC.getConnectionIdForProxy(currentProxy);
-  }
-
 }
 }

+ 31 - 73
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicies.java

@@ -33,8 +33,6 @@ import org.apache.commons.logging.LogFactory;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.RemoteException;
 import org.apache.hadoop.ipc.StandbyException;
 import org.apache.hadoop.ipc.StandbyException;
 
 
-import com.google.common.annotations.VisibleForTesting;
-
 /**
 /**
  * <p>
  * <p>
  * A collection of useful implementations of {@link RetryPolicy}.
  * A collection of useful implementations of {@link RetryPolicy}.
@@ -44,8 +42,6 @@ public class RetryPolicies {
   
   
   public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
   public static final Log LOG = LogFactory.getLog(RetryPolicies.class);
   
   
-  private static final Random RAND = new Random();
-  
   /**
   /**
    * <p>
    * <p>
    * Try once, and fail by re-throwing the exception.
    * Try once, and fail by re-throwing the exception.
@@ -54,6 +50,14 @@ public class RetryPolicies {
    */
    */
   public static final RetryPolicy TRY_ONCE_THEN_FAIL = new TryOnceThenFail();
   public static final RetryPolicy TRY_ONCE_THEN_FAIL = new TryOnceThenFail();
   
   
+  /**
+   * <p>
+   * Try once, and fail silently for <code>void</code> methods, or by
+   * re-throwing the exception for non-<code>void</code> methods.
+   * </p>
+   */
+  public static final RetryPolicy TRY_ONCE_DONT_FAIL = new TryOnceDontFail();
+  
   /**
   /**
    * <p>
    * <p>
    * Keep trying forever.
    * Keep trying forever.
@@ -133,17 +137,16 @@ public class RetryPolicies {
   
   
   public static final RetryPolicy failoverOnNetworkException(
   public static final RetryPolicy failoverOnNetworkException(
       RetryPolicy fallbackPolicy, int maxFailovers) {
       RetryPolicy fallbackPolicy, int maxFailovers) {
-    return failoverOnNetworkException(fallbackPolicy, maxFailovers, 0, 0);
-  }
-  
-  public static final RetryPolicy failoverOnNetworkException(
-      RetryPolicy fallbackPolicy, int maxFailovers, long delayMillis,
-      long maxDelayBase) {
-    return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers,
-        delayMillis, maxDelayBase);
+    return new FailoverOnNetworkExceptionRetry(fallbackPolicy, maxFailovers);
   }
   }
   
   
   static class TryOnceThenFail implements RetryPolicy {
   static class TryOnceThenFail implements RetryPolicy {
+    public RetryAction shouldRetry(Exception e, int retries, int failovers,
+        boolean isMethodIdempotent) throws Exception {
+      throw e;
+    }
+  }
+  static class TryOnceDontFail implements RetryPolicy {
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
         boolean isMethodIdempotent) throws Exception {
         boolean isMethodIdempotent) throws Exception {
       return RetryAction.FAIL;
       return RetryAction.FAIL;
@@ -171,10 +174,14 @@ public class RetryPolicies {
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
     public RetryAction shouldRetry(Exception e, int retries, int failovers,
         boolean isMethodIdempotent) throws Exception {
         boolean isMethodIdempotent) throws Exception {
       if (retries >= maxRetries) {
       if (retries >= maxRetries) {
-        return RetryAction.FAIL;
+        throw e;
       }
       }
-      return new RetryAction(RetryAction.RetryDecision.RETRY,
-          timeUnit.toMillis(calculateSleepTime(retries)));
+      try {
+        timeUnit.sleep(calculateSleepTime(retries));
+      } catch (InterruptedException ie) {
+        // retry
+      }
+      return RetryAction.RETRY;
     }
     }
     
     
     protected abstract long calculateSleepTime(int retries);
     protected abstract long calculateSleepTime(int retries);
@@ -261,7 +268,7 @@ public class RetryPolicies {
   }
   }
   
   
   static class ExponentialBackoffRetry extends RetryLimited {
   static class ExponentialBackoffRetry extends RetryLimited {
-    
+    private Random r = new Random();
     public ExponentialBackoffRetry(
     public ExponentialBackoffRetry(
         int maxRetries, long sleepTime, TimeUnit timeUnit) {
         int maxRetries, long sleepTime, TimeUnit timeUnit) {
       super(maxRetries, sleepTime, timeUnit);
       super(maxRetries, sleepTime, timeUnit);
@@ -269,19 +276,16 @@ public class RetryPolicies {
     
     
     @Override
     @Override
     protected long calculateSleepTime(int retries) {
     protected long calculateSleepTime(int retries) {
-      return calculateExponentialTime(sleepTime, retries + 1);
+      return sleepTime*r.nextInt(1<<(retries+1));
     }
     }
   }
   }
   
   
-  /**
+  /*
    * Fail over and retry in the case of:
    * Fail over and retry in the case of:
    *   Remote StandbyException (server is up, but is not the active server)
    *   Remote StandbyException (server is up, but is not the active server)
    *   Immediate socket exceptions (e.g. no route to host, econnrefused)
    *   Immediate socket exceptions (e.g. no route to host, econnrefused)
    *   Socket exceptions after initial connection when operation is idempotent
    *   Socket exceptions after initial connection when operation is idempotent
    * 
    * 
-   * The first failover is immediate, while all subsequent failovers wait an
-   * exponentially-increasing random amount of time.
-   * 
    * Fail immediately in the case of:
    * Fail immediately in the case of:
    *   Socket exceptions after initial connection when operation is not idempotent
    *   Socket exceptions after initial connection when operation is not idempotent
    * 
    * 
@@ -291,49 +295,33 @@ public class RetryPolicies {
     
     
     private RetryPolicy fallbackPolicy;
     private RetryPolicy fallbackPolicy;
     private int maxFailovers;
     private int maxFailovers;
-    private long delayMillis;
-    private long maxDelayBase;
     
     
     public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
     public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
         int maxFailovers) {
         int maxFailovers) {
-      this(fallbackPolicy, maxFailovers, 0, 0);
-    }
-    
-    public FailoverOnNetworkExceptionRetry(RetryPolicy fallbackPolicy,
-        int maxFailovers, long delayMillis, long maxDelayBase) {
       this.fallbackPolicy = fallbackPolicy;
       this.fallbackPolicy = fallbackPolicy;
       this.maxFailovers = maxFailovers;
       this.maxFailovers = maxFailovers;
-      this.delayMillis = delayMillis;
-      this.maxDelayBase = maxDelayBase;
     }
     }
 
 
     @Override
     @Override
     public RetryAction shouldRetry(Exception e, int retries,
     public RetryAction shouldRetry(Exception e, int retries,
         int failovers, boolean isMethodIdempotent) throws Exception {
         int failovers, boolean isMethodIdempotent) throws Exception {
       if (failovers >= maxFailovers) {
       if (failovers >= maxFailovers) {
-        return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
-            "failovers (" + failovers + ") exceeded maximum allowed ("
+        LOG.info("Failovers (" + failovers + ") exceeded maximum allowed ("
             + maxFailovers + ")");
             + maxFailovers + ")");
+        return RetryAction.FAIL;
       }
       }
       
       
       if (e instanceof ConnectException ||
       if (e instanceof ConnectException ||
           e instanceof NoRouteToHostException ||
           e instanceof NoRouteToHostException ||
           e instanceof UnknownHostException ||
           e instanceof UnknownHostException ||
-          e instanceof StandbyException ||
-          isWrappedStandbyException(e)) {
-        return new RetryAction(
-            RetryAction.RetryDecision.FAILOVER_AND_RETRY,
-            // retry immediately if this is our first failover, sleep otherwise
-            failovers == 0 ? 0 :
-                calculateExponentialTime(delayMillis, failovers, maxDelayBase));
+          e instanceof StandbyException) {
+        return RetryAction.FAILOVER_AND_RETRY;
       } else if (e instanceof SocketException ||
       } else if (e instanceof SocketException ||
-                 (e instanceof IOException && !(e instanceof RemoteException))) {
+                 e instanceof IOException) {
         if (isMethodIdempotent) {
         if (isMethodIdempotent) {
           return RetryAction.FAILOVER_AND_RETRY;
           return RetryAction.FAILOVER_AND_RETRY;
         } else {
         } else {
-          return new RetryAction(RetryAction.RetryDecision.FAIL, 0,
-              "the invoked method is not idempotent, and unable to determine " +
-              "whether it was invoked");
+          return RetryAction.FAIL;
         }
         }
       } else {
       } else {
         return fallbackPolicy.shouldRetry(e, retries, failovers,
         return fallbackPolicy.shouldRetry(e, retries, failovers,
@@ -342,34 +330,4 @@ public class RetryPolicies {
     }
     }
     
     
   }
   }
-
-  /**
-   * Return a value which is <code>time</code> increasing exponentially as a
-   * function of <code>retries</code>, +/- 0%-50% of that value, chosen
-   * randomly.
-   * 
-   * @param time the base amount of time to work with
-   * @param retries the number of retries that have so occurred so far
-   * @param cap value at which to cap the base sleep time
-   * @return an amount of time to sleep
-   */
-  @VisibleForTesting
-  public static long calculateExponentialTime(long time, int retries,
-      long cap) {
-    long baseTime = Math.min(time * ((long)1 << retries), cap);
-    return (long) (baseTime * (RAND.nextFloat() + 0.5));
-  }
-
-  private static long calculateExponentialTime(long time, int retries) {
-    return calculateExponentialTime(time, retries, Long.MAX_VALUE);
-  }
-  
-  private static boolean isWrappedStandbyException(Exception e) {
-    if (!(e instanceof RemoteException)) {
-      return false;
-    }
-    Exception unwrapped = ((RemoteException)e).unwrapRemoteException(
-        StandbyException.class);
-    return unwrapped instanceof StandbyException;
-  }
 }
 }

+ 5 - 33
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/io/retry/RetryPolicy.java

@@ -19,6 +19,7 @@ package org.apache.hadoop.io.retry;
 
 
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
+
 /**
 /**
  * <p>
  * <p>
  * Specifies a policy for retrying method failures.
  * Specifies a policy for retrying method failures.
@@ -32,39 +33,10 @@ public interface RetryPolicy {
    * Returned by {@link RetryPolicy#shouldRetry(Exception, int, int, boolean)}.
    * Returned by {@link RetryPolicy#shouldRetry(Exception, int, int, boolean)}.
    */
    */
   @InterfaceStability.Evolving
   @InterfaceStability.Evolving
-  public static class RetryAction {
-    
-    // A few common retry policies, with no delays.
-    public static final RetryAction FAIL =
-        new RetryAction(RetryDecision.FAIL);
-    public static final RetryAction RETRY =
-        new RetryAction(RetryDecision.RETRY);
-    public static final RetryAction FAILOVER_AND_RETRY =
-        new RetryAction(RetryDecision.FAILOVER_AND_RETRY);
-    
-    public final RetryDecision action;
-    public final long delayMillis;
-    public final String reason;
-    
-    public RetryAction(RetryDecision action) {
-      this(action, 0, null);
-    }
-    
-    public RetryAction(RetryDecision action, long delayTime) {
-      this(action, delayTime, null);
-    }
-    
-    public RetryAction(RetryDecision action, long delayTime, String reason) {
-      this.action = action;
-      this.delayMillis = delayTime;
-      this.reason = reason;
-    }
-    
-    public enum RetryDecision {
-      FAIL,
-      RETRY,
-      FAILOVER_AND_RETRY
-    }
+  public enum RetryAction {
+    FAIL,
+    RETRY,
+    FAILOVER_AND_RETRY
   }
   }
   
   
   /**
   /**

+ 238 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroRpcEngine.java

@@ -0,0 +1,238 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+package org.apache.hadoop.ipc;
+
+import java.io.Closeable;
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+import java.lang.reflect.InvocationHandler;
+import java.lang.reflect.Method;
+import java.lang.reflect.Proxy;
+import java.net.InetSocketAddress;
+import java.nio.ByteBuffer;
+import java.util.ArrayList;
+import java.util.List;
+
+import javax.net.SocketFactory;
+
+import org.apache.avro.ipc.Responder;
+import org.apache.avro.ipc.Transceiver;
+import org.apache.avro.ipc.reflect.ReflectRequestor;
+import org.apache.avro.ipc.reflect.ReflectResponder;
+import org.apache.avro.ipc.specific.SpecificRequestor;
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.classification.InterfaceStability;
+import org.apache.hadoop.conf.Configuration;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.token.SecretManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
+
+/** Tunnel Avro-format RPC requests over a Hadoop {@link RPC} connection.  This
+ * does not give cross-language wire compatibility, since the Hadoop RPC wire
+ * format is non-standard, but it does permit use of Avro's protocol versioning
+ * features for inter-Java RPCs. */
+@InterfaceStability.Evolving
+public class AvroRpcEngine implements RpcEngine {
+  private static final Log LOG = LogFactory.getLog(RPC.class);
+
+  private static int VERSION = 0;
+
+  // the implementation we tunnel through
+  private static final RpcEngine ENGINE = new WritableRpcEngine();
+
+  /** Tunnel an Avro RPC request and response through Hadoop's RPC. */
+  private static interface TunnelProtocol extends VersionedProtocol {
+    //WritableRpcEngine expects a versionID in every protocol.
+    public static final long versionID = 0L;
+    /** All Avro methods and responses go through this. */
+    BufferListWritable call(BufferListWritable request) throws IOException;
+  }
+
+  /** A Writable that holds a List<ByteBuffer>, The Avro RPC Transceiver's
+   * basic unit of data transfer.*/
+  private static class BufferListWritable implements Writable {
+    private List<ByteBuffer> buffers;
+
+    public BufferListWritable() {}                // required for RPC Writables
+
+    public BufferListWritable(List<ByteBuffer> buffers) {
+      this.buffers = buffers;
+    }
+
+    public void readFields(DataInput in) throws IOException {
+      int size = in.readInt();
+      buffers = new ArrayList<ByteBuffer>(size);
+      for (int i = 0; i < size; i++) {
+        int length = in.readInt();
+        ByteBuffer buffer = ByteBuffer.allocate(length);
+        in.readFully(buffer.array(), 0, length);
+        buffers.add(buffer);
+      }
+    }
+  
+    public void write(DataOutput out) throws IOException {
+      out.writeInt(buffers.size());
+      for (ByteBuffer buffer : buffers) {
+        out.writeInt(buffer.remaining());
+        out.write(buffer.array(), buffer.position(), buffer.remaining());
+      }
+    }
+  }
+
+  /** An Avro RPC Transceiver that tunnels client requests through Hadoop
+   * RPC. */
+  private static class ClientTransceiver extends Transceiver {
+    private TunnelProtocol tunnel;
+    private InetSocketAddress remote;
+  
+    public ClientTransceiver(InetSocketAddress addr,
+                             UserGroupInformation ticket,
+                             Configuration conf, SocketFactory factory,
+                             int rpcTimeout)
+      throws IOException {
+      this.tunnel = ENGINE.getProxy(TunnelProtocol.class, VERSION,
+                                        addr, ticket, conf, factory,
+                                        rpcTimeout).getProxy();
+      this.remote = addr;
+    }
+
+    public String getRemoteName() { return remote.toString(); }
+
+    public List<ByteBuffer> transceive(List<ByteBuffer> request)
+      throws IOException {
+      return tunnel.call(new BufferListWritable(request)).buffers;
+    }
+
+    public List<ByteBuffer> readBuffers() throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    public void writeBuffers(List<ByteBuffer> buffers) throws IOException {
+      throw new UnsupportedOperationException();
+    }
+
+    public void close() throws IOException {
+      ENGINE.stopProxy(tunnel);
+    }
+  }
+
+  /** Construct a client-side proxy object that implements the named protocol,
+   * talking to a server at the named address. 
+   * @param <T>*/
+  @SuppressWarnings("unchecked")
+  public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
+                         InetSocketAddress addr, UserGroupInformation ticket,
+                         Configuration conf, SocketFactory factory,
+                         int rpcTimeout)
+    throws IOException {
+    return new ProtocolProxy<T>(protocol,
+       (T)Proxy.newProxyInstance(
+         protocol.getClassLoader(),
+         new Class[] { protocol },
+         new Invoker(protocol, addr, ticket, conf, factory, rpcTimeout)),
+       false);
+  }
+
+  /** Stop this proxy. */
+  public void stopProxy(Object proxy) {
+    try {
+      ((Invoker)Proxy.getInvocationHandler(proxy)).close();
+    } catch (IOException e) {
+      LOG.warn("Error while stopping "+proxy, e);
+    }
+  }
+
+  private class Invoker implements InvocationHandler, Closeable {
+    private final ClientTransceiver tx;
+    private final SpecificRequestor requestor;
+    public Invoker(Class<?> protocol, InetSocketAddress addr,
+                   UserGroupInformation ticket, Configuration conf,
+                   SocketFactory factory,
+                   int rpcTimeout) throws IOException {
+      this.tx = new ClientTransceiver(addr, ticket, conf, factory, rpcTimeout);
+      this.requestor = createRequestor(protocol, tx);
+    }
+    @Override public Object invoke(Object proxy, Method method, Object[] args) 
+      throws Throwable {
+      return requestor.invoke(proxy, method, args);
+    }
+    public void close() throws IOException {
+      tx.close();
+    }
+  }
+
+  protected SpecificRequestor createRequestor(Class<?> protocol, 
+      Transceiver transeiver) throws IOException {
+    return new ReflectRequestor(protocol, transeiver);
+  }
+
+  protected Responder createResponder(Class<?> iface, Object impl) {
+    return new ReflectResponder(iface, impl);
+  }
+
+  /** An Avro RPC Responder that can process requests passed via Hadoop RPC. */
+  private class TunnelResponder implements TunnelProtocol {
+    private Responder responder;
+    public TunnelResponder(Class<?> iface, Object impl) {
+      responder = createResponder(iface, impl);
+    }
+
+    @Override
+    public long getProtocolVersion(String protocol, long version)
+    throws IOException {
+      return VERSION;
+    }
+
+    @Override
+    public ProtocolSignature getProtocolSignature(
+        String protocol, long version, int clientMethodsHashCode)
+      throws IOException {
+      return new ProtocolSignature(VERSION, null);
+    }
+
+    public BufferListWritable call(final BufferListWritable request)
+      throws IOException {
+      return new BufferListWritable(responder.respond(request.buffers));
+    }
+  }
+
+  public Object[] call(Method method, Object[][] params,
+                       InetSocketAddress[] addrs, UserGroupInformation ticket,
+                       Configuration conf) throws IOException {
+    throw new UnsupportedOperationException();
+  }
+
+  /** Construct a server for a protocol implementation instance listening on a
+   * port and address. */
+  public RPC.Server getServer(Class<?> iface, Object impl, String bindAddress,
+                              int port, int numHandlers, int numReaders,
+                              int queueSizePerHandler, boolean verbose,
+                              Configuration conf, 
+                       SecretManager<? extends TokenIdentifier> secretManager
+                              ) throws IOException {
+    return ENGINE.getServer(TunnelProtocol.class,
+                            new TunnelResponder(iface, impl),
+                            bindAddress, port, numHandlers, numReaders,
+                            queueSizePerHandler, verbose, conf, secretManager);
+  }
+
+}

+ 15 - 11
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ha/BadFencingConfigurationException.java → hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/AvroSpecificRpcEngine.java

@@ -15,27 +15,31 @@
  * See the License for the specific language governing permissions and
  * See the License for the specific language governing permissions and
  * limitations under the License.
  * limitations under the License.
  */
  */
-package org.apache.hadoop.ha;
+
+package org.apache.hadoop.ipc;
 
 
 import java.io.IOException;
 import java.io.IOException;
 
 
-import org.apache.hadoop.classification.InterfaceAudience;
+import org.apache.avro.ipc.Responder;
+import org.apache.avro.ipc.Transceiver;
+import org.apache.avro.ipc.specific.SpecificRequestor;
+import org.apache.avro.ipc.specific.SpecificResponder;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
- * Indicates that the operator has specified an invalid configuration
- * for fencing methods.
+ * AvroRpcEngine which uses Avro's "specific" APIs. The protocols generated 
+ * via Avro IDL needs to use this Engine.
  */
  */
-@InterfaceAudience.Public
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class BadFencingConfigurationException extends IOException {
-  private static final long serialVersionUID = 1L;
+public class AvroSpecificRpcEngine extends AvroRpcEngine {
 
 
-  public BadFencingConfigurationException(String msg) {
-    super(msg);
+  protected SpecificRequestor createRequestor(Class<?> protocol, 
+      Transceiver transeiver) throws IOException {
+    return new SpecificRequestor(protocol, transeiver);
   }
   }
 
 
-  public BadFencingConfigurationException(String msg, Throwable cause) {
-    super(msg, cause);
+  protected Responder createResponder(Class<?> iface, Object impl) {
+    return new SpecificResponder(iface, impl);
   }
   }
+
 }
 }

+ 60 - 140
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Client.java

@@ -50,8 +50,6 @@ import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeys;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
 import org.apache.hadoop.fs.CommonConfigurationKeysPublic;
-import org.apache.hadoop.ipc.RpcPayloadHeader.*;
-import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.IOUtils;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Text;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
@@ -67,7 +65,6 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.TokenSelector;
 import org.apache.hadoop.security.token.TokenInfo;
 import org.apache.hadoop.security.token.TokenInfo;
-import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 
 
 /** A client for an IPC service.  IPC calls take a single {@link Writable} as a
 /** A client for an IPC service.  IPC calls take a single {@link Writable} as a
@@ -156,20 +153,16 @@ public class Client {
     return refCount==0;
     return refCount==0;
   }
   }
 
 
-  /** 
-   * Class that represents an RPC call
-   */
+  /** A call waiting for a value. */
   private class Call {
   private class Call {
-    final int id;               // call id
-    final Writable rpcRequest;  // the serialized rpc request - RpcPayload
-    Writable rpcResponse;       // null if rpc has error
-    IOException error;          // exception, null if success
-    final RpcKind rpcKind;      // Rpc EngineKind
-    boolean done;               // true when call is done
-
-    protected Call(RpcKind rpcKind, Writable param) {
-      this.rpcKind = rpcKind;
-      this.rpcRequest = param;
+    int id;                                       // call id
+    Writable param;                               // parameter
+    Writable value;                               // value, null if error
+    IOException error;                            // exception, null if value
+    boolean done;                                 // true when call is done
+
+    protected Call(Writable param) {
+      this.param = param;
       synchronized (Client.this) {
       synchronized (Client.this) {
         this.id = counter++;
         this.id = counter++;
       }
       }
@@ -195,15 +188,15 @@ public class Client {
     /** Set the return value when there is no error. 
     /** Set the return value when there is no error. 
      * Notify the caller the call is done.
      * Notify the caller the call is done.
      * 
      * 
-     * @param rpcResponse return value of the rpc call.
+     * @param value return value of the call.
      */
      */
-    public synchronized void setRpcResponse(Writable rpcResponse) {
-      this.rpcResponse = rpcResponse;
+    public synchronized void setValue(Writable value) {
+      this.value = value;
       callComplete();
       callComplete();
     }
     }
     
     
-    public synchronized Writable getRpcResult() {
-      return rpcResponse;
+    public synchronized Writable getValue() {
+      return value;
     }
     }
   }
   }
 
 
@@ -213,7 +206,7 @@ public class Client {
   private class Connection extends Thread {
   private class Connection extends Thread {
     private InetSocketAddress server;             // server ip:port
     private InetSocketAddress server;             // server ip:port
     private String serverPrincipal;  // server's krb5 principal name
     private String serverPrincipal;  // server's krb5 principal name
-    private IpcConnectionContextProto connectionContext;   // connection context
+    private ConnectionHeader header;              // connection header
     private final ConnectionId remoteId;                // connection id
     private final ConnectionId remoteId;                // connection id
     private AuthMethod authMethod; // authentication method
     private AuthMethod authMethod; // authentication method
     private boolean useSasl;
     private boolean useSasl;
@@ -227,8 +220,6 @@ public class Client {
     private int maxIdleTime; //connections will be culled if it was idle for 
     private int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
     //maxIdleTime msecs
     private int maxRetries; //the max. no. of retries for socket connections
     private int maxRetries; //the max. no. of retries for socket connections
-    // the max. no. of retries for socket connections on time out exceptions
-    private int maxRetriesOnSocketTimeouts;
     private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
     private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
     private boolean doPing; //do we need to send ping message
     private boolean doPing; //do we need to send ping message
     private int pingInterval; // how often sends ping to the server in msecs
     private int pingInterval; // how often sends ping to the server in msecs
@@ -252,7 +243,6 @@ public class Client {
       this.rpcTimeout = remoteId.getRpcTimeout();
       this.rpcTimeout = remoteId.getRpcTimeout();
       this.maxIdleTime = remoteId.getMaxIdleTime();
       this.maxIdleTime = remoteId.getMaxIdleTime();
       this.maxRetries = remoteId.getMaxRetries();
       this.maxRetries = remoteId.getMaxRetries();
-      this.maxRetriesOnSocketTimeouts = remoteId.getMaxRetriesOnSocketTimeouts();
       this.tcpNoDelay = remoteId.getTcpNoDelay();
       this.tcpNoDelay = remoteId.getTcpNoDelay();
       this.doPing = remoteId.getDoPing();
       this.doPing = remoteId.getDoPing();
       this.pingInterval = remoteId.getPingInterval();
       this.pingInterval = remoteId.getPingInterval();
@@ -297,8 +287,8 @@ public class Client {
         authMethod = AuthMethod.KERBEROS;
         authMethod = AuthMethod.KERBEROS;
       }
       }
       
       
-      connectionContext = ProtoUtil.makeIpcConnectionContext(
-          RPC.getProtocolName(protocol), ticket, authMethod);
+      header = new ConnectionHeader(protocol == null ? null : protocol
+          .getName(), ticket, authMethod);
       
       
       if (LOG.isDebugEnabled())
       if (LOG.isDebugEnabled())
         LOG.debug("Use " + authMethod + " authentication for protocol "
         LOG.debug("Use " + authMethod + " authentication for protocol "
@@ -481,8 +471,11 @@ public class Client {
           if (updateAddress()) {
           if (updateAddress()) {
             timeoutFailures = ioFailures = 0;
             timeoutFailures = ioFailures = 0;
           }
           }
-          handleConnectionFailure(timeoutFailures++,
-              maxRetriesOnSocketTimeouts, toe);
+          /*
+           * The max number of retries is 45, which amounts to 20s*45 = 15
+           * minutes retries.
+           */
+          handleConnectionFailure(timeoutFailures++, 45, toe);
         } catch (IOException ie) {
         } catch (IOException ie) {
           if (updateAddress()) {
           if (updateAddress()) {
             timeoutFailures = ioFailures = 0;
             timeoutFailures = ioFailures = 0;
@@ -565,7 +558,7 @@ public class Client {
           setupConnection();
           setupConnection();
           InputStream inStream = NetUtils.getInputStream(socket);
           InputStream inStream = NetUtils.getInputStream(socket);
           OutputStream outStream = NetUtils.getOutputStream(socket);
           OutputStream outStream = NetUtils.getOutputStream(socket);
-          writeConnectionHeader(outStream);
+          writeRpcHeader(outStream);
           if (useSasl) {
           if (useSasl) {
             final InputStream in2 = inStream;
             final InputStream in2 = inStream;
             final OutputStream out2 = outStream;
             final OutputStream out2 = outStream;
@@ -599,11 +592,8 @@ public class Client {
             } else {
             } else {
               // fall back to simple auth because server told us so.
               // fall back to simple auth because server told us so.
               authMethod = AuthMethod.SIMPLE;
               authMethod = AuthMethod.SIMPLE;
-              // remake the connectionContext             
-              connectionContext = ProtoUtil.makeIpcConnectionContext(
-                  connectionContext.getProtocol(), 
-                  ProtoUtil.getUgi(connectionContext.getUserInfo()),
-                  authMethod);
+              header = new ConnectionHeader(header.getProtocol(), header
+                  .getUgi(), authMethod);
               useSasl = false;
               useSasl = false;
             }
             }
           }
           }
@@ -683,26 +673,13 @@ public class Client {
           ". Already tried " + curRetries + " time(s).");
           ". Already tried " + curRetries + " time(s).");
     }
     }
 
 
-    /**
-     * Write the connection header - this is sent when connection is established
-     * +----------------------------------+
-     * |  "hrpc" 4 bytes                  |      
-     * +----------------------------------+
-     * |  Version (1 bytes)               |      
-     * +----------------------------------+
-     * |  Authmethod (1 byte)             |      
-     * +----------------------------------+
-     * |  IpcSerializationType (1 byte)   |      
-     * +----------------------------------+
-     */
-    private void writeConnectionHeader(OutputStream outStream)
-        throws IOException {
+    /* Write the RPC header */
+    private void writeRpcHeader(OutputStream outStream) throws IOException {
       DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
       DataOutputStream out = new DataOutputStream(new BufferedOutputStream(outStream));
       // Write out the header, version and authentication method
       // Write out the header, version and authentication method
       out.write(Server.HEADER.array());
       out.write(Server.HEADER.array());
       out.write(Server.CURRENT_VERSION);
       out.write(Server.CURRENT_VERSION);
       authMethod.write(out);
       authMethod.write(out);
-      Server.IpcSerializationType.PROTOBUF.write(out);
       out.flush();
       out.flush();
     }
     }
     
     
@@ -712,7 +689,7 @@ public class Client {
     private void writeHeader() throws IOException {
     private void writeHeader() throws IOException {
       // Write out the ConnectionHeader
       // Write out the ConnectionHeader
       DataOutputBuffer buf = new DataOutputBuffer();
       DataOutputBuffer buf = new DataOutputBuffer();
-      connectionContext.writeTo(buf);
+      header.write(buf);
       
       
       // Write out the payload length
       // Write out the payload length
       int bufLen = buf.getLength();
       int bufLen = buf.getLength();
@@ -751,7 +728,6 @@ public class Client {
       }
       }
     }
     }
 
 
-    @SuppressWarnings("unused")
     public InetSocketAddress getRemoteAddress() {
     public InetSocketAddress getRemoteAddress() {
       return server;
       return server;
     }
     }
@@ -813,10 +789,8 @@ public class Client {
           //data to be written
           //data to be written
           d = new DataOutputBuffer();
           d = new DataOutputBuffer();
           d.writeInt(0); // placeholder for data length
           d.writeInt(0); // placeholder for data length
-          RpcPayloadHeader header = new RpcPayloadHeader(
-              call.rpcKind, RpcPayloadOperation.RPC_FINAL_PAYLOAD, call.id);
-          header.write(d);
-          call.rpcRequest.write(d);
+          d.writeInt(call.id);
+          call.param.write(d);
           byte[] data = d.getData();
           byte[] data = d.getData();
           int dataLength = d.getLength() - 4;
           int dataLength = d.getLength() - 4;
           data[0] = (byte)((dataLength >>> 24) & 0xff);
           data[0] = (byte)((dataLength >>> 24) & 0xff);
@@ -856,7 +830,7 @@ public class Client {
         if (state == Status.SUCCESS.state) {
         if (state == Status.SUCCESS.state) {
           Writable value = ReflectionUtils.newInstance(valueClass, conf);
           Writable value = ReflectionUtils.newInstance(valueClass, conf);
           value.readFields(in);                 // read value
           value.readFields(in);                 // read value
-          call.setRpcResponse(value);
+          call.setValue(value);
           calls.remove(id);
           calls.remove(id);
         } else if (state == Status.ERROR.state) {
         } else if (state == Status.ERROR.state) {
           call.setException(new RemoteException(WritableUtils.readString(in),
           call.setException(new RemoteException(WritableUtils.readString(in),
@@ -940,7 +914,7 @@ public class Client {
     private int index;
     private int index;
     
     
     public ParallelCall(Writable param, ParallelResults results, int index) {
     public ParallelCall(Writable param, ParallelResults results, int index) {
-      super(RpcKind.RPC_WRITABLE, param);
+      super(param);
       this.results = results;
       this.results = results;
       this.index = index;
       this.index = index;
     }
     }
@@ -964,7 +938,7 @@ public class Client {
 
 
     /** Collect a result. */
     /** Collect a result. */
     public synchronized void callComplete(ParallelCall call) {
     public synchronized void callComplete(ParallelCall call) {
-      values[call.index] = call.getRpcResult();       // store the value
+      values[call.index] = call.getValue();       // store the value
       count++;                                    // count it
       count++;                                    // count it
       if (count == size)                          // if all values are in
       if (count == size)                          // if all values are in
         notify();                                 // then notify waiting caller
         notify();                                 // then notify waiting caller
@@ -1024,25 +998,15 @@ public class Client {
     }
     }
   }
   }
 
 
-  /**
-   * Same as {@link #call(RpcPayloadHeader.RpcKind, Writable, ConnectionId)}
-   *  for RPC_BUILTIN
-   */
-  public Writable call(Writable param, InetSocketAddress address)
-  throws InterruptedException, IOException {
-    return call(RpcKind.RPC_BUILTIN, param, address);
-    
-  }
   /** Make a call, passing <code>param</code>, to the IPC server running at
   /** Make a call, passing <code>param</code>, to the IPC server running at
    * <code>address</code>, returning the value.  Throws exceptions if there are
    * <code>address</code>, returning the value.  Throws exceptions if there are
    * network problems or if the remote code threw an exception.
    * network problems or if the remote code threw an exception.
-   * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, Writable,
-   *  ConnectionId)} instead 
+   * @deprecated Use {@link #call(Writable, ConnectionId)} instead 
    */
    */
   @Deprecated
   @Deprecated
-  public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress address)
+  public Writable call(Writable param, InetSocketAddress address)
   throws InterruptedException, IOException {
   throws InterruptedException, IOException {
-      return call(rpcKind, param, address, null);
+      return call(param, address, null);
   }
   }
   
   
   /** Make a call, passing <code>param</code>, to the IPC server running at
   /** Make a call, passing <code>param</code>, to the IPC server running at
@@ -1050,16 +1014,15 @@ public class Client {
    * the value.  
    * the value.  
    * Throws exceptions if there are network problems or if the remote code 
    * Throws exceptions if there are network problems or if the remote code 
    * threw an exception.
    * threw an exception.
-   * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, Writable, 
-   * ConnectionId)} instead 
+   * @deprecated Use {@link #call(Writable, ConnectionId)} instead 
    */
    */
   @Deprecated
   @Deprecated
-  public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr, 
+  public Writable call(Writable param, InetSocketAddress addr, 
       UserGroupInformation ticket)  
       UserGroupInformation ticket)  
       throws InterruptedException, IOException {
       throws InterruptedException, IOException {
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, null, ticket, 0,
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, null, ticket, 0,
         conf);
         conf);
-    return call(rpcKind, param, remoteId);
+    return call(param, remoteId);
   }
   }
   
   
   /** Make a call, passing <code>param</code>, to the IPC server running at
   /** Make a call, passing <code>param</code>, to the IPC server running at
@@ -1068,34 +1031,18 @@ public class Client {
    * timeout, returning the value.  
    * timeout, returning the value.  
    * Throws exceptions if there are network problems or if the remote code 
    * Throws exceptions if there are network problems or if the remote code 
    * threw an exception. 
    * threw an exception. 
-   * @deprecated Use {@link #call(RpcPayloadHeader.RpcKind, Writable,
-   *  ConnectionId)} instead 
+   * @deprecated Use {@link #call(Writable, ConnectionId)} instead 
    */
    */
   @Deprecated
   @Deprecated
-  public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr, 
+  public Writable call(Writable param, InetSocketAddress addr, 
                        Class<?> protocol, UserGroupInformation ticket,
                        Class<?> protocol, UserGroupInformation ticket,
                        int rpcTimeout)  
                        int rpcTimeout)  
                        throws InterruptedException, IOException {
                        throws InterruptedException, IOException {
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
         ticket, rpcTimeout, conf);
         ticket, rpcTimeout, conf);
-    return call(rpcKind, param, remoteId);
+    return call(param, remoteId);
   }
   }
 
 
-  
-  /**
-   * Same as {@link #call(RpcPayloadHeader.RpcKind, Writable, InetSocketAddress, 
-   * Class, UserGroupInformation, int, Configuration)}
-   * except that rpcKind is writable.
-   */
-  public Writable call(Writable param, InetSocketAddress addr, 
-      Class<?> protocol, UserGroupInformation ticket,
-      int rpcTimeout, Configuration conf)  
-      throws InterruptedException, IOException {
-        ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
-        ticket, rpcTimeout, conf);
-    return call(RpcKind.RPC_BUILTIN, param, remoteId);
-  }
-  
   /**
   /**
    * Make a call, passing <code>param</code>, to the IPC server running at
    * Make a call, passing <code>param</code>, to the IPC server running at
    * <code>address</code> which is servicing the <code>protocol</code> protocol,
    * <code>address</code> which is servicing the <code>protocol</code> protocol,
@@ -1104,38 +1051,22 @@ public class Client {
    * value. Throws exceptions if there are network problems or if the remote
    * value. Throws exceptions if there are network problems or if the remote
    * code threw an exception.
    * code threw an exception.
    */
    */
-  public Writable call(RpcKind rpcKind, Writable param, InetSocketAddress addr, 
+  public Writable call(Writable param, InetSocketAddress addr, 
                        Class<?> protocol, UserGroupInformation ticket,
                        Class<?> protocol, UserGroupInformation ticket,
                        int rpcTimeout, Configuration conf)  
                        int rpcTimeout, Configuration conf)  
                        throws InterruptedException, IOException {
                        throws InterruptedException, IOException {
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
     ConnectionId remoteId = ConnectionId.getConnectionId(addr, protocol,
         ticket, rpcTimeout, conf);
         ticket, rpcTimeout, conf);
-    return call(rpcKind, param, remoteId);
+    return call(param, remoteId);
   }
   }
   
   
-  /**
-   * Same as {link {@link #call(RpcPayloadHeader.RpcKind, Writable, ConnectionId)}
-   * except the rpcKind is RPC_BUILTIN
-   */
+  /** Make a call, passing <code>param</code>, to the IPC server defined by
+   * <code>remoteId</code>, returning the value.  
+   * Throws exceptions if there are network problems or if the remote code 
+   * threw an exception. */
   public Writable call(Writable param, ConnectionId remoteId)  
   public Writable call(Writable param, ConnectionId remoteId)  
       throws InterruptedException, IOException {
       throws InterruptedException, IOException {
-     return call(RpcKind.RPC_BUILTIN, param, remoteId);
-  }
-  
-  /** 
-   * Make a call, passing <code>rpcRequest</code>, to the IPC server defined by
-   * <code>remoteId</code>, returning the rpc respond.
-   * 
-   * @param rpcKind
-   * @param rpcRequest -  contains serialized method and method parameters
-   * @param remoteId - the target rpc server
-   * @returns the rpc response
-   * Throws exceptions if there are network problems or if the remote code 
-   * threw an exception.
-   */
-  public Writable call(RpcKind rpcKind, Writable rpcRequest,
-      ConnectionId remoteId) throws InterruptedException, IOException {
-    Call call = new Call(rpcKind, rpcRequest);
+    Call call = new Call(param);
     Connection connection = getConnection(remoteId, call);
     Connection connection = getConnection(remoteId, call);
     connection.sendParam(call);                 // send the parameter
     connection.sendParam(call);                 // send the parameter
     boolean interrupted = false;
     boolean interrupted = false;
@@ -1167,7 +1098,7 @@ public class Client {
                   call.error);
                   call.error);
         }
         }
       } else {
       } else {
-        return call.rpcResponse;
+        return call.value;
       }
       }
     }
     }
   }
   }
@@ -1279,24 +1210,22 @@ public class Client {
   public static class ConnectionId {
   public static class ConnectionId {
     InetSocketAddress address;
     InetSocketAddress address;
     UserGroupInformation ticket;
     UserGroupInformation ticket;
-    final Class<?> protocol;
+    Class<?> protocol;
     private static final int PRIME = 16777619;
     private static final int PRIME = 16777619;
-    private final int rpcTimeout;
-    private final String serverPrincipal;
-    private final int maxIdleTime; //connections will be culled if it was idle for 
+    private int rpcTimeout;
+    private String serverPrincipal;
+    private int maxIdleTime; //connections will be culled if it was idle for 
     //maxIdleTime msecs
     //maxIdleTime msecs
-    private final int maxRetries; //the max. no. of retries for socket connections
-    // the max. no. of retries for socket connections on time out exceptions
-    private final int maxRetriesOnSocketTimeouts;
-    private final boolean tcpNoDelay; // if T then disable Nagle's Algorithm
-    private final boolean doPing; //do we need to send ping message
-    private final int pingInterval; // how often sends ping to the server in msecs
+    private int maxRetries; //the max. no. of retries for socket connections
+    private boolean tcpNoDelay; // if T then disable Nagle's Algorithm
+    private boolean doPing; //do we need to send ping message
+    private int pingInterval; // how often sends ping to the server in msecs
     
     
     ConnectionId(InetSocketAddress address, Class<?> protocol, 
     ConnectionId(InetSocketAddress address, Class<?> protocol, 
                  UserGroupInformation ticket, int rpcTimeout,
                  UserGroupInformation ticket, int rpcTimeout,
                  String serverPrincipal, int maxIdleTime, 
                  String serverPrincipal, int maxIdleTime, 
-                 int maxRetries, int maxRetriesOnSocketTimeouts,
-                 boolean tcpNoDelay, boolean doPing, int pingInterval) {
+                 int maxRetries, boolean tcpNoDelay,
+                 boolean doPing, int pingInterval) {
       this.protocol = protocol;
       this.protocol = protocol;
       this.address = address;
       this.address = address;
       this.ticket = ticket;
       this.ticket = ticket;
@@ -1304,7 +1233,6 @@ public class Client {
       this.serverPrincipal = serverPrincipal;
       this.serverPrincipal = serverPrincipal;
       this.maxIdleTime = maxIdleTime;
       this.maxIdleTime = maxIdleTime;
       this.maxRetries = maxRetries;
       this.maxRetries = maxRetries;
-      this.maxRetriesOnSocketTimeouts = maxRetriesOnSocketTimeouts;
       this.tcpNoDelay = tcpNoDelay;
       this.tcpNoDelay = tcpNoDelay;
       this.doPing = doPing;
       this.doPing = doPing;
       this.pingInterval = pingInterval;
       this.pingInterval = pingInterval;
@@ -1338,11 +1266,6 @@ public class Client {
       return maxRetries;
       return maxRetries;
     }
     }
     
     
-    /** max connection retries on socket time outs */
-    public int getMaxRetriesOnSocketTimeouts() {
-      return maxRetriesOnSocketTimeouts;
-    }
-    
     boolean getTcpNoDelay() {
     boolean getTcpNoDelay() {
       return tcpNoDelay;
       return tcpNoDelay;
     }
     }
@@ -1377,9 +1300,6 @@ public class Client {
               CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT),
               CommonConfigurationKeysPublic.IPC_CLIENT_CONNECTION_MAXIDLETIME_DEFAULT),
           conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
           conf.getInt(CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_KEY,
               CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT),
               CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_DEFAULT),
-          conf.getInt(
-            CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_KEY,
-            CommonConfigurationKeysPublic.IPC_CLIENT_CONNECT_MAX_RETRIES_ON_SOCKET_TIMEOUTS_DEFAULT),
           conf.getBoolean(CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
           conf.getBoolean(CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_KEY,
               CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_DEFAULT),
               CommonConfigurationKeysPublic.IPC_CLIENT_TCPNODELAY_DEFAULT),
           doPing, 
           doPing, 

+ 121 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ConnectionHeader.java

@@ -0,0 +1,121 @@
+/**
+ * Licensed to the Apache Software Foundation (ASF) under one
+ * or more contributor license agreements.  See the NOTICE file
+ * distributed with this work for additional information
+ * regarding copyright ownership.  The ASF licenses this file
+ * to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance
+ * with the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.hadoop.ipc;
+
+import java.io.DataInput;
+import java.io.DataOutput;
+import java.io.IOException;
+
+import org.apache.commons.logging.Log;
+import org.apache.commons.logging.LogFactory;
+import org.apache.hadoop.io.Text;
+import org.apache.hadoop.io.Writable;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+
+/**
+ * The IPC connection header sent by the client to the server
+ * on connection establishment.
+ */
+class ConnectionHeader implements Writable {
+  public static final Log LOG = LogFactory.getLog(ConnectionHeader.class);
+  
+  private String protocol;
+  private UserGroupInformation ugi = null;
+  private AuthMethod authMethod;
+  
+  public ConnectionHeader() {}
+  
+  /**
+   * Create a new {@link ConnectionHeader} with the given <code>protocol</code>
+   * and {@link UserGroupInformation}. 
+   * @param protocol protocol used for communication between the IPC client
+   *                 and the server
+   * @param ugi {@link UserGroupInformation} of the client communicating with
+   *            the server
+   */
+  public ConnectionHeader(String protocol, UserGroupInformation ugi, AuthMethod authMethod) {
+    this.protocol = protocol;
+    this.ugi = ugi;
+    this.authMethod = authMethod;
+  }
+
+  @Override
+  public void readFields(DataInput in) throws IOException {
+    protocol = Text.readString(in);
+    if (protocol.isEmpty()) {
+      protocol = null;
+    }
+    
+    boolean ugiUsernamePresent = in.readBoolean();
+    if (ugiUsernamePresent) {
+      String username = in.readUTF();
+      boolean realUserNamePresent = in.readBoolean();
+      if (realUserNamePresent) {
+        String realUserName = in.readUTF();
+        UserGroupInformation realUserUgi = UserGroupInformation
+            .createRemoteUser(realUserName);
+        ugi = UserGroupInformation.createProxyUser(username, realUserUgi);
+      } else {
+      	ugi = UserGroupInformation.createRemoteUser(username);
+      }
+    } else {
+      ugi = null;
+    }
+  }
+
+  @Override
+  public void write(DataOutput out) throws IOException {
+    Text.writeString(out, (protocol == null) ? "" : protocol);
+    if (ugi != null) {
+      if (authMethod == AuthMethod.KERBEROS) {
+        // Send effective user for Kerberos auth
+        out.writeBoolean(true);
+        out.writeUTF(ugi.getUserName());
+        out.writeBoolean(false);
+      } else if (authMethod == AuthMethod.DIGEST) {
+        // Don't send user for token auth
+        out.writeBoolean(false);
+      } else {
+        //Send both effective user and real user for simple auth
+        out.writeBoolean(true);
+        out.writeUTF(ugi.getUserName());
+        if (ugi.getRealUser() != null) {
+          out.writeBoolean(true);
+          out.writeUTF(ugi.getRealUser().getUserName());
+        } else {
+          out.writeBoolean(false);
+        }
+      }
+    } else {
+      out.writeBoolean(false);
+    }
+  }
+
+  public String getProtocol() {
+    return protocol;
+  }
+
+  public UserGroupInformation getUgi() {
+    return ugi;
+  }
+
+  public String toString() {
+    return protocol + "-" + ugi;
+  }
+}

+ 0 - 49
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufHelper.java

@@ -1,49 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-import com.google.protobuf.ServiceException;
-
-/**
- * Helper methods for protobuf related RPC implementation
- */
-@InterfaceAudience.Private
-public class ProtobufHelper {
-  private ProtobufHelper() {
-    // Hidden constructor for class with only static helper methods
-  }
-
-  /**
-   * Return the IOException thrown by the remote server wrapped in 
-   * ServiceException as cause.
-   * @param se ServiceException that wraps IO exception thrown by the server
-   * @return Exception wrapped in ServiceException or
-   *         a new IOException that wraps the unexpected ServiceException.
-   */
-  public static IOException getRemoteException(ServiceException se) {
-    Throwable e = se.getCause();
-    if (e == null) {
-      return new IOException(se);
-    }
-    return e instanceof IOException ? (IOException) e : new IOException(se);
-  }
-}

+ 0 - 437
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtobufRpcEngine.java

@@ -1,437 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-package org.apache.hadoop.ipc;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
-import java.net.InetSocketAddress;
-import java.util.Map;
-import java.util.concurrent.ConcurrentHashMap;
-
-import javax.net.SocketFactory;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.io.DataOutputOutputStream;
-import org.apache.hadoop.io.Writable;
-import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.RPC.RpcInvoker;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
-
-import org.apache.hadoop.ipc.protobuf.HadoopRpcProtos.HadoopRpcRequestProto;
-import org.apache.hadoop.security.UserGroupInformation;
-import org.apache.hadoop.security.token.SecretManager;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.ProtoUtil;
-
-import com.google.common.annotations.VisibleForTesting;
-import com.google.protobuf.BlockingService;
-import com.google.protobuf.Descriptors.MethodDescriptor;
-import com.google.protobuf.Message;
-import com.google.protobuf.ServiceException;
-
-/**
- * RPC Engine for for protobuf based RPCs.
- */
-@InterfaceStability.Evolving
-public class ProtobufRpcEngine implements RpcEngine {
-  private static final Log LOG = LogFactory.getLog(ProtobufRpcEngine.class);
-  
-  static { // Register the rpcRequest deserializer for WritableRpcEngine 
-    org.apache.hadoop.ipc.Server.registerProtocolEngine(
-        RpcKind.RPC_PROTOCOL_BUFFER, RpcRequestWritable.class,
-        new Server.ProtoBufRpcInvoker());
-  }
-
-  private static final ClientCache CLIENTS = new ClientCache();
-
-  @Override
-  @SuppressWarnings("unchecked")
-  public <T> ProtocolProxy<T> getProxy(Class<T> protocol, long clientVersion,
-      InetSocketAddress addr, UserGroupInformation ticket, Configuration conf,
-      SocketFactory factory, int rpcTimeout) throws IOException {
-
-    return new ProtocolProxy<T>(protocol, (T) Proxy.newProxyInstance(protocol
-        .getClassLoader(), new Class[] { protocol }, new Invoker(protocol,
-        addr, ticket, conf, factory, rpcTimeout)), false);
-  }
-  
-  @Override
-  public ProtocolProxy<ProtocolMetaInfoPB> getProtocolMetaInfoProxy(
-      ConnectionId connId, Configuration conf, SocketFactory factory)
-      throws IOException {
-    Class<ProtocolMetaInfoPB> protocol = ProtocolMetaInfoPB.class;
-    return new ProtocolProxy<ProtocolMetaInfoPB>(protocol,
-        (ProtocolMetaInfoPB) Proxy.newProxyInstance(protocol.getClassLoader(),
-            new Class[] { protocol }, new Invoker(protocol, connId, conf,
-                factory)), false);
-  }
-
-  private static class Invoker implements RpcInvocationHandler {
-    private final Map<String, Message> returnTypes = 
-        new ConcurrentHashMap<String, Message>();
-    private boolean isClosed = false;
-    private final Client.ConnectionId remoteId;
-    private final Client client;
-    private final long clientProtocolVersion;
-    private final String protocolName;
-
-    public Invoker(Class<?> protocol, InetSocketAddress addr,
-        UserGroupInformation ticket, Configuration conf, SocketFactory factory,
-        int rpcTimeout) throws IOException {
-      this(protocol, Client.ConnectionId.getConnectionId(addr, protocol,
-          ticket, rpcTimeout, conf), conf, factory);
-    }
-    
-    /**
-     * This constructor takes a connectionId, instead of creating a new one.
-     */
-    public Invoker(Class<?> protocol, Client.ConnectionId connId,
-        Configuration conf, SocketFactory factory) {
-      this.remoteId = connId;
-      this.client = CLIENTS.getClient(conf, factory, RpcResponseWritable.class);
-      this.protocolName = RPC.getProtocolName(protocol);
-      this.clientProtocolVersion = RPC
-          .getProtocolVersion(protocol);
-    }
-
-    private HadoopRpcRequestProto constructRpcRequest(Method method,
-        Object[] params) throws ServiceException {
-      HadoopRpcRequestProto rpcRequest;
-      HadoopRpcRequestProto.Builder builder = HadoopRpcRequestProto
-          .newBuilder();
-      builder.setMethodName(method.getName());
-
-      if (params.length != 2) { // RpcController + Message
-        throw new ServiceException("Too many parameters for request. Method: ["
-            + method.getName() + "]" + ", Expected: 2, Actual: "
-            + params.length);
-      }
-      if (params[1] == null) {
-        throw new ServiceException("null param while calling Method: ["
-            + method.getName() + "]");
-      }
-
-      Message param = (Message) params[1];
-      builder.setRequest(param.toByteString());
-      // For protobuf, {@code protocol} used when creating client side proxy is
-      // the interface extending BlockingInterface, which has the annotations 
-      // such as ProtocolName etc.
-      //
-      // Using Method.getDeclaringClass(), as in WritableEngine to get at
-      // the protocol interface will return BlockingInterface, from where 
-      // the annotation ProtocolName and Version cannot be
-      // obtained.
-      //
-      // Hence we simply use the protocol class used to create the proxy.
-      // For PB this may limit the use of mixins on client side.
-      builder.setDeclaringClassProtocolName(protocolName);
-      builder.setClientProtocolVersion(clientProtocolVersion);
-      rpcRequest = builder.build();
-      return rpcRequest;
-    }
-
-    /**
-     * This is the client side invoker of RPC method. It only throws
-     * ServiceException, since the invocation proxy expects only
-     * ServiceException to be thrown by the method in case protobuf service.
-     * 
-     * ServiceException has the following causes:
-     * <ol>
-     * <li>Exceptions encountered on the client side in this method are 
-     * set as cause in ServiceException as is.</li>
-     * <li>Exceptions from the server are wrapped in RemoteException and are
-     * set as cause in ServiceException</li>
-     * </ol>
-     * 
-     * Note that the client calling protobuf RPC methods, must handle
-     * ServiceException by getting the cause from the ServiceException. If the
-     * cause is RemoteException, then unwrap it to get the exception thrown by
-     * the server.
-     */
-    @Override
-    public Object invoke(Object proxy, Method method, Object[] args)
-        throws ServiceException {
-      long startTime = 0;
-      if (LOG.isDebugEnabled()) {
-        startTime = System.currentTimeMillis();
-      }
-
-      HadoopRpcRequestProto rpcRequest = constructRpcRequest(method, args);
-      RpcResponseWritable val = null;
-      try {
-        val = (RpcResponseWritable) client.call(RpcKind.RPC_PROTOCOL_BUFFER,
-            new RpcRequestWritable(rpcRequest), remoteId);
-      } catch (Throwable e) {
-        throw new ServiceException(e);
-      }
-
-      if (LOG.isDebugEnabled()) {
-        long callTime = System.currentTimeMillis() - startTime;
-        LOG.debug("Call: " + method.getName() + " " + callTime);
-      }
-      
-      Message prototype = null;
-      try {
-        prototype = getReturnProtoType(method);
-      } catch (Exception e) {
-        throw new ServiceException(e);
-      }
-      Message returnMessage;
-      try {
-        returnMessage = prototype.newBuilderForType()
-            .mergeFrom(val.responseMessage).build();
-      } catch (Throwable e) {
-        throw new ServiceException(e);
-      }
-      return returnMessage;
-    }
-
-    public void close() throws IOException {
-      if (!isClosed) {
-        isClosed = true;
-        CLIENTS.stopClient(client);
-      }
-    }
-
-    private Message getReturnProtoType(Method method) throws Exception {
-      if (returnTypes.containsKey(method.getName())) {
-        return returnTypes.get(method.getName());
-      }
-      
-      Class<?> returnType = method.getReturnType();
-      Method newInstMethod = returnType.getMethod("getDefaultInstance");
-      newInstMethod.setAccessible(true);
-      Message prototype = (Message) newInstMethod.invoke(null, (Object[]) null);
-      returnTypes.put(method.getName(), prototype);
-      return prototype;
-    }
-
-    @Override //RpcInvocationHandler
-    public ConnectionId getConnectionId() {
-      return remoteId;
-    }
-  }
-
-  @Override
-  public Object[] call(Method method, Object[][] params,
-      InetSocketAddress[] addrs, UserGroupInformation ticket, Configuration conf) {
-    throw new UnsupportedOperationException();
-  }
-
-  /**
-   * Writable Wrapper for Protocol Buffer Requests
-   */
-  private static class RpcRequestWritable implements Writable {
-    HadoopRpcRequestProto message;
-
-    @SuppressWarnings("unused")
-    public RpcRequestWritable() {
-    }
-
-    RpcRequestWritable(HadoopRpcRequestProto message) {
-      this.message = message;
-    }
-
-    @Override
-    public void write(DataOutput out) throws IOException {
-      ((Message)message).writeDelimitedTo(
-          DataOutputOutputStream.constructOutputStream(out));
-    }
-
-    @Override
-    public void readFields(DataInput in) throws IOException {
-      int length = ProtoUtil.readRawVarint32(in);
-      byte[] bytes = new byte[length];
-      in.readFully(bytes);
-      message = HadoopRpcRequestProto.parseFrom(bytes);
-    }
-  }
-
-  /**
-   * Writable Wrapper for Protocol Buffer Responses
-   */
-  private static class RpcResponseWritable implements Writable {
-    byte[] responseMessage;
-
-    @SuppressWarnings("unused")
-    public RpcResponseWritable() {
-    }
-
-    public RpcResponseWritable(Message message) {
-      this.responseMessage = message.toByteArray();
-    }
-
-    @Override
-    public void write(DataOutput out) throws IOException {
-      out.writeInt(responseMessage.length);
-      out.write(responseMessage);     
-    }
-
-    @Override
-    public void readFields(DataInput in) throws IOException {
-      int length = in.readInt();
-      byte[] bytes = new byte[length];
-      in.readFully(bytes);
-      responseMessage = bytes;
-    }
-  }
-
-  @VisibleForTesting
-  @InterfaceAudience.Private
-  @InterfaceStability.Unstable
-  static Client getClient(Configuration conf) {
-    return CLIENTS.getClient(conf, SocketFactory.getDefault(),
-        RpcResponseWritable.class);
-  }
-  
- 
-
-  @Override
-  public RPC.Server getServer(Class<?> protocol, Object protocolImpl,
-      String bindAddress, int port, int numHandlers, int numReaders,
-      int queueSizePerHandler, boolean verbose, Configuration conf,
-      SecretManager<? extends TokenIdentifier> secretManager)
-      throws IOException {
-    return new Server(protocol, protocolImpl, conf, bindAddress, port,
-        numHandlers, numReaders, queueSizePerHandler, verbose, secretManager);
-  }
-  
-  public static class Server extends RPC.Server {
-    /**
-     * Construct an RPC server.
-     * 
-     * @param protocolClass the class of protocol
-     * @param protocolImpl the protocolImpl whose methods will be called
-     * @param conf the configuration to use
-     * @param bindAddress the address to bind on to listen for connection
-     * @param port the port to listen for connections on
-     * @param numHandlers the number of method handler threads to run
-     * @param verbose whether each call should be logged
-     */
-    public Server(Class<?> protocolClass, Object protocolImpl,
-        Configuration conf, String bindAddress, int port, int numHandlers,
-        int numReaders, int queueSizePerHandler, boolean verbose,
-        SecretManager<? extends TokenIdentifier> secretManager)
-        throws IOException {
-      super(bindAddress, port, null, numHandlers,
-          numReaders, queueSizePerHandler, conf, classNameBase(protocolImpl
-              .getClass().getName()), secretManager);
-      this.verbose = verbose;  
-      registerProtocolAndImpl(RpcKind.RPC_PROTOCOL_BUFFER, protocolClass,
-          protocolImpl);
-    }
-    
-    /**
-     * Protobuf invoker for {@link RpcInvoker}
-     */
-    static class ProtoBufRpcInvoker implements RpcInvoker {
-      private static ProtoClassProtoImpl getProtocolImpl(RPC.Server server,
-          String protoName, long version) throws IOException {
-        ProtoNameVer pv = new ProtoNameVer(protoName, version);
-        ProtoClassProtoImpl impl = 
-            server.getProtocolImplMap(RpcKind.RPC_PROTOCOL_BUFFER).get(pv);
-        if (impl == null) { // no match for Protocol AND Version
-          VerProtocolImpl highest = 
-              server.getHighestSupportedProtocol(RpcKind.RPC_PROTOCOL_BUFFER, 
-                  protoName);
-          if (highest == null) {
-            throw new IOException("Unknown protocol: " + protoName);
-          }
-          // protocol supported but not the version that client wants
-          throw new RPC.VersionMismatch(protoName, version,
-              highest.version);
-        }
-        return impl;
-      }
-
-      @Override 
-      /**
-       * This is a server side method, which is invoked over RPC. On success
-       * the return response has protobuf response payload. On failure, the
-       * exception name and the stack trace are return in the resposne.
-       * See {@link HadoopRpcResponseProto}
-       * 
-       * In this method there three types of exceptions possible and they are
-       * returned in response as follows.
-       * <ol>
-       * <li> Exceptions encountered in this method that are returned 
-       * as {@link RpcServerException} </li>
-       * <li> Exceptions thrown by the service is wrapped in ServiceException. 
-       * In that this method returns in response the exception thrown by the 
-       * service.</li>
-       * <li> Other exceptions thrown by the service. They are returned as
-       * it is.</li>
-       * </ol>
-       */
-      public Writable call(RPC.Server server, String protocol,
-          Writable writableRequest, long receiveTime) throws Exception {
-        RpcRequestWritable request = (RpcRequestWritable) writableRequest;
-        HadoopRpcRequestProto rpcRequest = request.message;
-        String methodName = rpcRequest.getMethodName();
-        String protoName = rpcRequest.getDeclaringClassProtocolName();
-        long clientVersion = rpcRequest.getClientProtocolVersion();
-        if (server.verbose)
-          LOG.info("Call: protocol=" + protocol + ", method=" + methodName);
-        
-        ProtoClassProtoImpl protocolImpl = getProtocolImpl(server, protoName,
-            clientVersion);
-        BlockingService service = (BlockingService) protocolImpl.protocolImpl;
-        MethodDescriptor methodDescriptor = service.getDescriptorForType()
-            .findMethodByName(methodName);
-        if (methodDescriptor == null) {
-          String msg = "Unknown method " + methodName + " called on " + protocol
-              + " protocol.";
-          LOG.warn(msg);
-          throw new RpcServerException(msg);
-        }
-        Message prototype = service.getRequestPrototype(methodDescriptor);
-        Message param = prototype.newBuilderForType()
-            .mergeFrom(rpcRequest.getRequest()).build();
-        Message result;
-        try {
-          long startTime = System.currentTimeMillis();
-          server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
-          result = service.callBlockingMethod(methodDescriptor, null, param);
-          int processingTime = (int) (System.currentTimeMillis() - startTime);
-          int qTime = (int) (startTime - receiveTime);
-          if (LOG.isDebugEnabled()) {
-            LOG.info("Served: " + methodName + " queueTime= " + qTime +
-                      " procesingTime= " + processingTime);
-          }
-          server.rpcMetrics.addRpcQueueTime(qTime);
-          server.rpcMetrics.addRpcProcessingTime(processingTime);
-          server.rpcDetailedMetrics.addProcessingTime(methodName,
-              processingTime);
-        } catch (ServiceException e) {
-          throw (Exception) e.getCause();
-        } catch (Exception e) {
-          throw e;
-        }
-        return new RpcResponseWritable(result);
-      }
-    }
-  }
-}

+ 0 - 39
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolInfo.java

@@ -1,39 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import java.lang.annotation.Retention;
-import java.lang.annotation.RetentionPolicy;
-
-
-/**
- * The protocol name that is used when a client and server connect.
- * By default the class name of the protocol interface is the protocol name.
- * 
- * Why override the default name (i.e. the class name)?
- * One use case overriding the default name (i.e. the class name) is when
- * there are multiple implementations of the same protocol, each with say a
- *  different version/serialization.
- * In Hadoop this is used to allow multiple server and client adapters
- * for different versions of the same protocol service.
- */
-@Retention(RetentionPolicy.RUNTIME)
-public @interface ProtocolInfo {
-  String protocolName();  // the name of the protocol (i.e. rpc service)
-  long protocolVersion() default -1; // default means not defined use old way
-}

+ 0 - 34
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoPB.java

@@ -1,34 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
-
-/**
- * Protocol to get versions and signatures for supported protocols from the
- * server.
- * 
- * Note: This extends the protocolbuffer service based interface to
- * add annotations.
- */
-@ProtocolInfo(
-    protocolName = "org.apache.hadoop.ipc.ProtocolMetaInfoPB", 
-    protocolVersion = 1)
-public interface ProtocolMetaInfoPB extends
-    ProtocolInfoService.BlockingInterface {
-}

+ 0 - 122
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInfoServerSideTranslatorPB.java

@@ -1,122 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import org.apache.hadoop.ipc.RPC.Server.VerProtocolImpl;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolVersionsRequestProto;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolVersionsResponseProto;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolVersionProto;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-/**
- * This class serves the requests for protocol versions and signatures by
- * looking them up in the server registry.
- */
-public class ProtocolMetaInfoServerSideTranslatorPB implements
-    ProtocolMetaInfoPB {
-
-  RPC.Server server;
-  
-  public ProtocolMetaInfoServerSideTranslatorPB(RPC.Server server) {
-    this.server = server;
-  }
-  
-  @Override
-  public GetProtocolVersionsResponseProto getProtocolVersions(
-      RpcController controller, GetProtocolVersionsRequestProto request)
-      throws ServiceException {
-    String protocol = request.getProtocol();
-    GetProtocolVersionsResponseProto.Builder builder = 
-        GetProtocolVersionsResponseProto.newBuilder();
-    for (RpcKind r : RpcKind.values()) {
-      long[] versions;
-      try {
-        versions = getProtocolVersionForRpcKind(r, protocol);
-      } catch (ClassNotFoundException e) {
-        throw new ServiceException(e);
-      }
-      ProtocolVersionProto.Builder b = ProtocolVersionProto.newBuilder();
-      if (versions != null) {
-        b.setRpcKind(r.toString());
-        for (long v : versions) {
-          b.addVersions(v);
-        }
-      }
-      builder.addProtocolVersions(b.build());
-    }
-    return builder.build();
-  }
-
-  @Override
-  public GetProtocolSignatureResponseProto getProtocolSignature(
-      RpcController controller, GetProtocolSignatureRequestProto request)
-      throws ServiceException {
-    GetProtocolSignatureResponseProto.Builder builder = GetProtocolSignatureResponseProto
-        .newBuilder();
-    String protocol = request.getProtocol();
-    String rpcKind = request.getRpcKind();
-    long[] versions;
-    try {
-      versions = getProtocolVersionForRpcKind(RpcKind.valueOf(rpcKind),
-          protocol);
-    } catch (ClassNotFoundException e1) {
-      throw new ServiceException(e1);
-    }
-    if (versions == null) {
-      return builder.build();
-    }
-    for (long v : versions) {
-      ProtocolSignatureProto.Builder sigBuilder = ProtocolSignatureProto
-          .newBuilder();
-      sigBuilder.setVersion(v);
-      try {
-        ProtocolSignature signature = ProtocolSignature.getProtocolSignature(
-            protocol, v);
-        for (int m : signature.getMethods()) {
-          sigBuilder.addMethods(m);
-        }
-      } catch (ClassNotFoundException e) {
-        throw new ServiceException(e);
-      }
-      builder.addProtocolSignature(sigBuilder.build());
-    }
-    return builder.build();
-  }
-  
-  private long[] getProtocolVersionForRpcKind(RpcKind rpcKind,
-      String protocol) throws ClassNotFoundException {
-    Class<?> protocolClass = Class.forName(protocol);
-    String protocolName = RPC.getProtocolName(protocolClass);
-    VerProtocolImpl[] vers = server.getSupportedProtocolVersions(rpcKind,
-        protocolName);
-    if (vers == null) {
-      return null;
-    }
-    long [] versions = new long[vers.length];
-    for (int i=0; i<versions.length; i++) {
-      versions[i] = vers[i].version;
-    }
-    return versions;
-  }
-}

+ 0 - 42
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolMetaInterface.java

@@ -1,42 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import java.io.IOException;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-
-/**
- * This interface is implemented by the client side translators and can be used
- * to obtain information about underlying protocol e.g. to check if a method is
- * supported on the server side.
- */
-@InterfaceAudience.Private
-@InterfaceStability.Stable
-public interface ProtocolMetaInterface {
-  
-  /**
-   * Checks whether the given method name is supported by the server.
-   * It is assumed that all method names are unique for a protocol.
-   * @param methodName The name of the method
-   * @return true if method is supported, otherwise false.
-   * @throws IOException
-   */
-  public boolean isMethodSupported(String methodName) throws IOException;
-}

+ 10 - 2
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolProxy.java

@@ -57,11 +57,19 @@ public class ProtocolProxy<T> {
   
   
   private void fetchServerMethods(Method method) throws IOException {
   private void fetchServerMethods(Method method) throws IOException {
     long clientVersion;
     long clientVersion;
-    clientVersion = RPC.getProtocolVersion(method.getDeclaringClass());
+    try {
+      Field versionField = method.getDeclaringClass().getField("versionID");
+      versionField.setAccessible(true);
+      clientVersion = versionField.getLong(method.getDeclaringClass());
+    } catch (NoSuchFieldException ex) {
+      throw new RuntimeException(ex);
+    } catch (IllegalAccessException ex) {
+      throw new RuntimeException(ex);
+    }
     int clientMethodsHash = ProtocolSignature.getFingerprint(method
     int clientMethodsHash = ProtocolSignature.getFingerprint(method
         .getDeclaringClass().getMethods());
         .getDeclaringClass().getMethods());
     ProtocolSignature serverInfo = ((VersionedProtocol) proxy)
     ProtocolSignature serverInfo = ((VersionedProtocol) proxy)
-        .getProtocolSignature(RPC.getProtocolName(protocol), clientVersion,
+        .getProtocolSignature(protocol.getName(), clientVersion,
             clientMethodsHash);
             clientMethodsHash);
     long serverVersion = serverInfo.getVersion();
     long serverVersion = serverInfo.getVersion();
     if (serverVersion != clientVersion) {
     if (serverVersion != clientVersion) {

+ 4 - 17
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolSignature.java

@@ -29,8 +29,6 @@ import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactories;
 import org.apache.hadoop.io.WritableFactory;
 import org.apache.hadoop.io.WritableFactory;
 
 
-import com.google.common.annotations.VisibleForTesting;
-
 public class ProtocolSignature implements Writable {
 public class ProtocolSignature implements Writable {
   static {               // register a ctor
   static {               // register a ctor
     WritableFactories.setFactory
     WritableFactories.setFactory
@@ -166,15 +164,10 @@ public class ProtocolSignature implements Writable {
   /**
   /**
    * A cache that maps a protocol's name to its signature & finger print
    * A cache that maps a protocol's name to its signature & finger print
    */
    */
-  private final static HashMap<String, ProtocolSigFingerprint> 
+  final private static HashMap<String, ProtocolSigFingerprint> 
      PROTOCOL_FINGERPRINT_CACHE = 
      PROTOCOL_FINGERPRINT_CACHE = 
        new HashMap<String, ProtocolSigFingerprint>();
        new HashMap<String, ProtocolSigFingerprint>();
   
   
-  @VisibleForTesting
-  public static void resetCache() {
-    PROTOCOL_FINGERPRINT_CACHE.clear();
-  }
-  
   /**
   /**
    * Return a protocol's signature and finger print from cache
    * Return a protocol's signature and finger print from cache
    * 
    * 
@@ -183,8 +176,8 @@ public class ProtocolSignature implements Writable {
    * @return its signature and finger print
    * @return its signature and finger print
    */
    */
   private static ProtocolSigFingerprint getSigFingerprint(
   private static ProtocolSigFingerprint getSigFingerprint(
-      Class <?> protocol, long serverVersion) {
-    String protocolName = RPC.getProtocolName(protocol);
+      Class <? extends VersionedProtocol> protocol, long serverVersion) {
+    String protocolName = protocol.getName();
     synchronized (PROTOCOL_FINGERPRINT_CACHE) {
     synchronized (PROTOCOL_FINGERPRINT_CACHE) {
       ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE.get(protocolName);
       ProtocolSigFingerprint sig = PROTOCOL_FINGERPRINT_CACHE.get(protocolName);
       if (sig == null) {
       if (sig == null) {
@@ -206,7 +199,7 @@ public class ProtocolSignature implements Writable {
    * @param protocol protocol
    * @param protocol protocol
    * @return the server's protocol signature
    * @return the server's protocol signature
    */
    */
-  public static ProtocolSignature getProtocolSignature(
+  static ProtocolSignature getProtocolSignature(
       int clientMethodsHashCode,
       int clientMethodsHashCode,
       long serverVersion,
       long serverVersion,
       Class<? extends VersionedProtocol> protocol) {
       Class<? extends VersionedProtocol> protocol) {
@@ -221,12 +214,6 @@ public class ProtocolSignature implements Writable {
     return sig.signature;
     return sig.signature;
   }
   }
   
   
-  public static ProtocolSignature getProtocolSignature(String protocolName,
-      long version) throws ClassNotFoundException {
-    Class<?> protocol = Class.forName(protocolName);
-    return getSigFingerprint(protocol, version).signature;
-  }
-  
   /**
   /**
    * Get a server protocol's signature
    * Get a server protocol's signature
    *
    *

+ 0 - 35
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/ProtocolTranslator.java

@@ -1,35 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import org.apache.hadoop.classification.InterfaceAudience;
-
-/**
- * An interface implemented by client-side protocol translators to get the
- * underlying proxy object the translator is operating on.
- */
-@InterfaceAudience.Private
-public interface ProtocolTranslator {
-  
-  /**
-   * Return the proxy object underlying this protocol translator.
-   * @return the proxy object underlying this protocol translator.
-   */
-  public Object getUnderlyingProxyObject();
-
-}

+ 28 - 342
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RPC.java

@@ -18,8 +18,6 @@
 
 
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.Method;
 import java.lang.reflect.Method;
 
 
@@ -28,10 +26,6 @@ import java.net.InetSocketAddress;
 import java.net.NoRouteToHostException;
 import java.net.NoRouteToHostException;
 import java.net.SocketTimeoutException;
 import java.net.SocketTimeoutException;
 import java.io.*;
 import java.io.*;
-import java.io.Closeable;
-import java.util.ArrayList;
-import java.util.Arrays;
-import java.util.List;
 import java.util.Map;
 import java.util.Map;
 import java.util.HashMap;
 import java.util.HashMap;
 
 
@@ -39,11 +33,7 @@ import javax.net.SocketFactory;
 
 
 import org.apache.commons.logging.*;
 import org.apache.commons.logging.*;
 
 
-import org.apache.hadoop.HadoopIllegalArgumentException;
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.*;
-import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolInfoService;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
@@ -52,8 +42,6 @@ import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.conf.*;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 
 
-import com.google.protobuf.BlockingService;
-
 /** A simple RPC mechanism.
 /** A simple RPC mechanism.
  *
  *
  * A <i>protocol</i> is a Java interface.  All parameters and return types must
  * A <i>protocol</i> is a Java interface.  All parameters and return types must
@@ -73,100 +61,17 @@ import com.google.protobuf.BlockingService;
  * the protocol instance is transmitted.
  * the protocol instance is transmitted.
  */
  */
 public class RPC {
 public class RPC {
-  
-  interface RpcInvoker {   
-    /**
-     * Process a client call on the server side
-     * @param server the server within whose context this rpc call is made
-     * @param protocol - the protocol name (the class of the client proxy
-     *      used to make calls to the rpc server.
-     * @param rpcRequest  - deserialized
-     * @param receiveTime time at which the call received (for metrics)
-     * @return the call's return
-     * @throws IOException
-     **/
-    public Writable call(Server server, String protocol,
-        Writable rpcRequest, long receiveTime) throws Exception ;
-  }
-  
   static final Log LOG = LogFactory.getLog(RPC.class);
   static final Log LOG = LogFactory.getLog(RPC.class);
-  
-  /**
-   * Get all superInterfaces that extend VersionedProtocol
-   * @param childInterfaces
-   * @return the super interfaces that extend VersionedProtocol
-   */
-  static Class<?>[] getSuperInterfaces(Class<?>[] childInterfaces) {
-    List<Class<?>> allInterfaces = new ArrayList<Class<?>>();
-
-    for (Class<?> childInterface : childInterfaces) {
-      if (VersionedProtocol.class.isAssignableFrom(childInterface)) {
-          allInterfaces.add(childInterface);
-          allInterfaces.addAll(
-              Arrays.asList(
-                  getSuperInterfaces(childInterface.getInterfaces())));
-      } else {
-        LOG.warn("Interface " + childInterface +
-              " ignored because it does not extend VersionedProtocol");
-      }
-    }
-    return allInterfaces.toArray(new Class[allInterfaces.size()]);
-  }
-  
-  /**
-   * Get all interfaces that the given protocol implements or extends
-   * which are assignable from VersionedProtocol.
-   */
-  static Class<?>[] getProtocolInterfaces(Class<?> protocol) {
-    Class<?>[] interfaces  = protocol.getInterfaces();
-    return getSuperInterfaces(interfaces);
-  }
-  
-  /**
-   * Get the protocol name.
-   *  If the protocol class has a ProtocolAnnotation, then get the protocol
-   *  name from the annotation; otherwise the class name is the protocol name.
-   */
-  static public String getProtocolName(Class<?> protocol) {
-    if (protocol == null) {
-      return null;
-    }
-    ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class);
-    return  (anno == null) ? protocol.getName() : anno.protocolName();
-  }
-  
-  /**
-   * Get the protocol version from protocol class.
-   * If the protocol class has a ProtocolAnnotation, then get the protocol
-   * name from the annotation; otherwise the class name is the protocol name.
-   */
-  static public long getProtocolVersion(Class<?> protocol) {
-    if (protocol == null) {
-      throw new IllegalArgumentException("Null protocol");
-    }
-    long version;
-    ProtocolInfo anno = protocol.getAnnotation(ProtocolInfo.class);
-    if (anno != null) {
-      version = anno.protocolVersion();
-      if (version != -1)
-        return version;
-    }
-    try {
-      Field versionField = protocol.getField("versionID");
-      versionField.setAccessible(true);
-      return versionField.getLong(protocol);
-    } catch (NoSuchFieldException ex) {
-      throw new RuntimeException(ex);
-    } catch (IllegalAccessException ex) {
-      throw new RuntimeException(ex);
-    }
-  }
 
 
   private RPC() {}                                  // no public ctor
   private RPC() {}                                  // no public ctor
 
 
   // cache of RpcEngines by protocol
   // cache of RpcEngines by protocol
-  private static final Map<Class<?>,RpcEngine> PROTOCOL_ENGINES
-    = new HashMap<Class<?>,RpcEngine>();
+  private static final Map<Class,RpcEngine> PROTOCOL_ENGINES
+    = new HashMap<Class,RpcEngine>();
+
+  // track what RpcEngine is used by a proxy class, for stopProxy()
+  private static final Map<Class,RpcEngine> PROXY_ENGINES
+    = new HashMap<Class,RpcEngine>();
 
 
   private static final String ENGINE_PROP = "rpc.engine";
   private static final String ENGINE_PROP = "rpc.engine";
 
 
@@ -177,23 +82,32 @@ public class RPC {
    * @param engine the RpcEngine impl
    * @param engine the RpcEngine impl
    */
    */
   public static void setProtocolEngine(Configuration conf,
   public static void setProtocolEngine(Configuration conf,
-                                Class<?> protocol, Class<?> engine) {
+                                Class protocol, Class engine) {
     conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, RpcEngine.class);
     conf.setClass(ENGINE_PROP+"."+protocol.getName(), engine, RpcEngine.class);
   }
   }
 
 
   // return the RpcEngine configured to handle a protocol
   // return the RpcEngine configured to handle a protocol
-  static synchronized RpcEngine getProtocolEngine(Class<?> protocol,
-      Configuration conf) {
+  private static synchronized RpcEngine getProtocolEngine(Class protocol,
+                                                          Configuration conf) {
     RpcEngine engine = PROTOCOL_ENGINES.get(protocol);
     RpcEngine engine = PROTOCOL_ENGINES.get(protocol);
     if (engine == null) {
     if (engine == null) {
       Class<?> impl = conf.getClass(ENGINE_PROP+"."+protocol.getName(),
       Class<?> impl = conf.getClass(ENGINE_PROP+"."+protocol.getName(),
                                     WritableRpcEngine.class);
                                     WritableRpcEngine.class);
       engine = (RpcEngine)ReflectionUtils.newInstance(impl, conf);
       engine = (RpcEngine)ReflectionUtils.newInstance(impl, conf);
+      if (protocol.isInterface())
+        PROXY_ENGINES.put(Proxy.getProxyClass(protocol.getClassLoader(),
+                                              protocol),
+                          engine);
       PROTOCOL_ENGINES.put(protocol, engine);
       PROTOCOL_ENGINES.put(protocol, engine);
     }
     }
     return engine;
     return engine;
   }
   }
 
 
+  // return the RpcEngine that handles a proxy object
+  private static synchronized RpcEngine getProxyEngine(Object proxy) {
+    return PROXY_ENGINES.get(proxy.getClass());
+  }
+
   /**
   /**
    * A version mismatch for the RPC protocol.
    * A version mismatch for the RPC protocol.
    */
    */
@@ -527,31 +441,7 @@ public class RPC {
 
 
      return getProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
      return getProtocolProxy(protocol, clientVersion, addr, conf).getProxy();
    }
    }
-  
-  /**
-   * Returns the server address for a given proxy.
-   */
-  public static InetSocketAddress getServerAddress(Object proxy) {
-    return getConnectionIdForProxy(proxy).getAddress();
-  }
 
 
-  /**
-   * Return the connection ID of the given object. If the provided object is in
-   * fact a protocol translator, we'll get the connection ID of the underlying
-   * proxy object.
-   * 
-   * @param proxy the proxy object to get the connection ID of.
-   * @return the connection ID for the provided proxy object.
-   */
-  public static ConnectionId getConnectionIdForProxy(Object proxy) {
-    if (proxy instanceof ProtocolTranslator) {
-      proxy = ((ProtocolTranslator)proxy).getUnderlyingProxyObject();
-    }
-    RpcInvocationHandler inv = (RpcInvocationHandler) Proxy
-        .getInvocationHandler(proxy);
-    return inv.getConnectionId();
-  }
-   
   /**
   /**
    * Get a protocol proxy that contains a proxy connection to a remote server
    * Get a protocol proxy that contains a proxy connection to a remote server
    * and a set of methods that are supported by the server
    * and a set of methods that are supported by the server
@@ -573,44 +463,14 @@ public class RPC {
   }
   }
 
 
   /**
   /**
-   * Stop the proxy. Proxy must either implement {@link Closeable} or must have
-   * associated {@link RpcInvocationHandler}.
-   * 
-   * @param proxy
-   *          the RPC proxy object to be stopped
-   * @throws HadoopIllegalArgumentException
-   *           if the proxy does not implement {@link Closeable} interface or
-   *           does not have closeable {@link InvocationHandler}
+   * Stop this proxy and release its invoker's resource
+   * @param proxy the proxy to be stopped
    */
    */
   public static void stopProxy(Object proxy) {
   public static void stopProxy(Object proxy) {
-    if (proxy == null) {
-      throw new HadoopIllegalArgumentException(
-          "Cannot close proxy since it is null");
-    }
-    try {
-      if (proxy instanceof Closeable) {
-        ((Closeable) proxy).close();
-        return;
-      } else {
-        InvocationHandler handler = Proxy.getInvocationHandler(proxy);
-        if (handler instanceof Closeable) {
-          ((Closeable) handler).close();
-          return;
-        }
-      }
-    } catch (IOException e) {
-      LOG.error("Closing proxy or invocation handler caused exception", e);
-    } catch (IllegalArgumentException e) {
-      LOG.error("RPC.stopProxy called on non proxy.", e);
+    RpcEngine rpcEngine;
+    if (proxy!=null && (rpcEngine = getProxyEngine(proxy)) != null) {
+      rpcEngine.stopProxy(proxy);
     }
     }
-    
-    // If you see this error on a mock object in a unit test you're
-    // developing, make sure to use MockitoUtil.mockProtocol() to
-    // create your mock.
-    throw new HadoopIllegalArgumentException(
-        "Cannot close proxy - is not Closeable or "
-            + "does not provide closeable invocation handler "
-            + proxy.getClass());
   }
   }
 
 
   /** 
   /** 
@@ -658,7 +518,7 @@ public class RPC {
   }
   }
 
 
   /** Construct a server for a protocol implementation instance. */
   /** Construct a server for a protocol implementation instance. */
-  public static Server getServer(Class<?> protocol,
+  public static Server getServer(Class protocol,
                                  Object instance, String bindAddress,
                                  Object instance, String bindAddress,
                                  int port, Configuration conf) 
                                  int port, Configuration conf) 
     throws IOException {
     throws IOException {
@@ -669,7 +529,7 @@ public class RPC {
    * @deprecated secretManager should be passed.
    * @deprecated secretManager should be passed.
    */
    */
   @Deprecated
   @Deprecated
-  public static Server getServer(Class<?> protocol,
+  public static Server getServer(Class protocol,
                                  Object instance, String bindAddress, int port,
                                  Object instance, String bindAddress, int port,
                                  int numHandlers,
                                  int numHandlers,
                                  boolean verbose, Configuration conf) 
                                  boolean verbose, Configuration conf) 
@@ -693,10 +553,8 @@ public class RPC {
   }
   }
 
 
   /** Construct a server for a protocol implementation instance. */
   /** Construct a server for a protocol implementation instance. */
-
-  public static <PROTO extends VersionedProtocol, IMPL extends PROTO> 
-        Server getServer(Class<PROTO> protocol,
-                                 IMPL instance, String bindAddress, int port,
+  public static Server getServer(Class<?> protocol,
+                                 Object instance, String bindAddress, int port,
                                  int numHandlers, int numReaders, int queueSizePerHandler,
                                  int numHandlers, int numReaders, int queueSizePerHandler,
                                  boolean verbose, Configuration conf,
                                  boolean verbose, Configuration conf,
                                  SecretManager<? extends TokenIdentifier> secretManager) 
                                  SecretManager<? extends TokenIdentifier> secretManager) 
@@ -709,147 +567,6 @@ public class RPC {
 
 
   /** An RPC Server. */
   /** An RPC Server. */
   public abstract static class Server extends org.apache.hadoop.ipc.Server {
   public abstract static class Server extends org.apache.hadoop.ipc.Server {
-   boolean verbose;
-   static String classNameBase(String className) {
-      String[] names = className.split("\\.", -1);
-      if (names == null || names.length == 0) {
-        return className;
-      }
-      return names[names.length-1];
-    }
-   
-   /**
-    * Store a map of protocol and version to its implementation
-    */
-   /**
-    *  The key in Map
-    */
-   static class ProtoNameVer {
-     final String protocol;
-     final long   version;
-     ProtoNameVer(String protocol, long ver) {
-       this.protocol = protocol;
-       this.version = ver;
-     }
-     @Override
-     public boolean equals(Object o) {
-       if (o == null) 
-         return false;
-       if (this == o) 
-         return true;
-       if (! (o instanceof ProtoNameVer))
-         return false;
-       ProtoNameVer pv = (ProtoNameVer) o;
-       return ((pv.protocol.equals(this.protocol)) && 
-           (pv.version == this.version));     
-     }
-     @Override
-     public int hashCode() {
-       return protocol.hashCode() * 37 + (int) version;    
-     }
-   }
-   
-   /**
-    * The value in map
-    */
-   static class ProtoClassProtoImpl {
-     final Class<?> protocolClass;
-     final Object protocolImpl; 
-     ProtoClassProtoImpl(Class<?> protocolClass, Object protocolImpl) {
-       this.protocolClass = protocolClass;
-       this.protocolImpl = protocolImpl;
-     }
-   }
-
-   ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>> protocolImplMapArray = 
-       new ArrayList<Map<ProtoNameVer, ProtoClassProtoImpl>>(RpcKind.MAX_INDEX);
-   
-   Map<ProtoNameVer, ProtoClassProtoImpl> getProtocolImplMap(RpcKind rpcKind) {
-     if (protocolImplMapArray.size() == 0) {// initialize for all rpc kinds
-       for (int i=0; i <= RpcKind.MAX_INDEX; ++i) {
-         protocolImplMapArray.add(
-             new HashMap<ProtoNameVer, ProtoClassProtoImpl>(10));
-       }
-     }
-     return protocolImplMapArray.get(rpcKind.ordinal());   
-   }
-   
-   // Register  protocol and its impl for rpc calls
-   void registerProtocolAndImpl(RpcKind rpcKind, Class<?> protocolClass, 
-       Object protocolImpl) throws IOException {
-     String protocolName = RPC.getProtocolName(protocolClass);
-     long version;
-     
-
-     try {
-       version = RPC.getProtocolVersion(protocolClass);
-     } catch (Exception ex) {
-       LOG.warn("Protocol "  + protocolClass + 
-            " NOT registered as cannot get protocol version ");
-       return;
-     }
-
-
-     getProtocolImplMap(rpcKind).put(new ProtoNameVer(protocolName, version),
-         new ProtoClassProtoImpl(protocolClass, protocolImpl)); 
-     LOG.debug("RpcKind = " + rpcKind + " Protocol Name = " + protocolName +  " version=" + version +
-         " ProtocolImpl=" + protocolImpl.getClass().getName() + 
-         " protocolClass=" + protocolClass.getName());
-   }
-   
-   static class VerProtocolImpl {
-     final long version;
-     final ProtoClassProtoImpl protocolTarget;
-     VerProtocolImpl(long ver, ProtoClassProtoImpl protocolTarget) {
-       this.version = ver;
-       this.protocolTarget = protocolTarget;
-     }
-   }
-   
-   
-   @SuppressWarnings("unused") // will be useful later.
-   VerProtocolImpl[] getSupportedProtocolVersions(RpcKind rpcKind,
-       String protocolName) {
-     VerProtocolImpl[] resultk = 
-         new  VerProtocolImpl[getProtocolImplMap(rpcKind).size()];
-     int i = 0;
-     for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv :
-                                       getProtocolImplMap(rpcKind).entrySet()) {
-       if (pv.getKey().protocol.equals(protocolName)) {
-         resultk[i++] = 
-             new VerProtocolImpl(pv.getKey().version, pv.getValue());
-       }
-     }
-     if (i == 0) {
-       return null;
-     }
-     VerProtocolImpl[] result = new VerProtocolImpl[i];
-     System.arraycopy(resultk, 0, result, 0, i);
-     return result;
-   }
-   
-   VerProtocolImpl getHighestSupportedProtocol(RpcKind rpcKind, 
-       String protocolName) {    
-     Long highestVersion = 0L;
-     ProtoClassProtoImpl highest = null;
-     if (LOG.isDebugEnabled()) {
-       LOG.debug("Size of protoMap for " + rpcKind + " ="
-           + getProtocolImplMap(rpcKind).size());
-     }
-     for (Map.Entry<ProtoNameVer, ProtoClassProtoImpl> pv : 
-           getProtocolImplMap(rpcKind).entrySet()) {
-       if (pv.getKey().protocol.equals(protocolName)) {
-         if ((highest == null) || (pv.getKey().version > highestVersion)) {
-           highest = pv.getValue();
-           highestVersion = pv.getKey().version;
-         } 
-       }
-     }
-     if (highest == null) {
-       return null;
-     }
-     return new VerProtocolImpl(highestVersion,  highest);   
-   }
   
   
     protected Server(String bindAddress, int port, 
     protected Server(String bindAddress, int port, 
                      Class<? extends Writable> paramClass, int handlerCount,
                      Class<? extends Writable> paramClass, int handlerCount,
@@ -858,38 +575,7 @@ public class RPC {
                      SecretManager<? extends TokenIdentifier> secretManager) throws IOException {
                      SecretManager<? extends TokenIdentifier> secretManager) throws IOException {
       super(bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler,
       super(bindAddress, port, paramClass, handlerCount, numReaders, queueSizePerHandler,
             conf, serverName, secretManager);
             conf, serverName, secretManager);
-      initProtocolMetaInfo(conf);
-    }
-    
-    private void initProtocolMetaInfo(Configuration conf)
-        throws IOException {
-      RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
-          ProtobufRpcEngine.class);
-      ProtocolMetaInfoServerSideTranslatorPB xlator = 
-          new ProtocolMetaInfoServerSideTranslatorPB(this);
-      BlockingService protocolInfoBlockingService = ProtocolInfoService
-          .newReflectiveBlockingService(xlator);
-      addProtocol(RpcKind.RPC_PROTOCOL_BUFFER, ProtocolMetaInfoPB.class,
-          protocolInfoBlockingService);
-    }
-    
-    /**
-     * Add a protocol to the existing server.
-     * @param protocolClass - the protocol class
-     * @param protocolImpl - the impl of the protocol that will be called
-     * @return the server (for convenience)
-     */
-    public Server addProtocol(RpcKind rpcKind, Class<?> protocolClass,
-        Object protocolImpl) throws IOException {
-      registerProtocolAndImpl(rpcKind, protocolClass, protocolImpl);
-      return this;
-    }
-    
-    @Override
-    public Writable call(RpcKind rpcKind, String protocol,
-        Writable rpcRequest, long receiveTime) throws Exception {
-      return getRpcInvoker(rpcKind).call(this, protocol, rpcRequest,
-          receiveTime);
     }
     }
   }
   }
+
 }
 }

+ 0 - 193
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcClientUtil.java

@@ -1,193 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import java.io.IOException;
-import java.lang.reflect.Method;
-import java.lang.reflect.Proxy;
-import java.net.InetSocketAddress;
-import java.util.List;
-import java.util.Map;
-import java.util.TreeMap;
-import java.util.concurrent.ConcurrentHashMap;
-
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureRequestProto;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.GetProtocolSignatureResponseProto;
-import org.apache.hadoop.ipc.protobuf.ProtocolInfoProtos.ProtocolSignatureProto;
-import org.apache.hadoop.net.NetUtils;
-
-import com.google.protobuf.RpcController;
-import com.google.protobuf.ServiceException;
-
-/**
- * This class maintains a cache of protocol versions and corresponding protocol
- * signatures, keyed by server address, protocol and rpc kind.
- * The cache is lazily populated. 
- */
-public class RpcClientUtil {
-  private static RpcController NULL_CONTROLLER = null;
-  private static final int PRIME = 16777619;
-  
-  private static class ProtoSigCacheKey {
-    private InetSocketAddress serverAddress;
-    private String protocol;
-    private String rpcKind;
-    
-    ProtoSigCacheKey(InetSocketAddress addr, String p, String rk) {
-      this.serverAddress = addr;
-      this.protocol = p;
-      this.rpcKind = rk;
-    }
-    
-    @Override //Object
-    public int hashCode() {
-      int result = 1;
-      result = PRIME * result
-          + ((serverAddress == null) ? 0 : serverAddress.hashCode());
-      result = PRIME * result + ((protocol == null) ? 0 : protocol.hashCode());
-      result = PRIME * result + ((rpcKind == null) ? 0 : rpcKind.hashCode());
-      return result;
-    }
-    
-    @Override //Object
-    public boolean equals(Object other) {
-      if (other == this) {
-        return true;
-      }
-      if (other instanceof ProtoSigCacheKey) {
-        ProtoSigCacheKey otherKey = (ProtoSigCacheKey) other;
-        return (serverAddress.equals(otherKey.serverAddress) &&
-            protocol.equals(otherKey.protocol) &&
-            rpcKind.equals(otherKey.rpcKind));
-      }
-      return false;
-    }
-  }
-  
-  private static ConcurrentHashMap<ProtoSigCacheKey, Map<Long, ProtocolSignature>> 
-  signatureMap = new ConcurrentHashMap<ProtoSigCacheKey, Map<Long, ProtocolSignature>>();
-
-  private static void putVersionSignatureMap(InetSocketAddress addr,
-      String protocol, String rpcKind, Map<Long, ProtocolSignature> map) {
-    signatureMap.put(new ProtoSigCacheKey(addr, protocol, rpcKind), map);
-  }
-  
-  private static Map<Long, ProtocolSignature> getVersionSignatureMap(
-      InetSocketAddress addr, String protocol, String rpcKind) {
-    return signatureMap.get(new ProtoSigCacheKey(addr, protocol, rpcKind));
-  }
-
-  /**
-   * Returns whether the given method is supported or not.
-   * The protocol signatures are fetched and cached. The connection id for the
-   * proxy provided is re-used.
-   * @param rpcProxy Proxy which provides an existing connection id.
-   * @param protocol Protocol for which the method check is required.
-   * @param rpcKind The RpcKind for which the method check is required.
-   * @param version The version at the client.
-   * @param methodName Name of the method.
-   * @return true if the method is supported, false otherwise.
-   * @throws IOException
-   */
-  public static boolean isMethodSupported(Object rpcProxy, Class<?> protocol,
-      RpcKind rpcKind, long version, String methodName) throws IOException {
-    InetSocketAddress serverAddress = RPC.getServerAddress(rpcProxy);
-    Map<Long, ProtocolSignature> versionMap = getVersionSignatureMap(
-        serverAddress, protocol.getName(), rpcKind.toString());
-
-    if (versionMap == null) {
-      Configuration conf = new Configuration();
-      RPC.setProtocolEngine(conf, ProtocolMetaInfoPB.class,
-          ProtobufRpcEngine.class);
-      ProtocolMetaInfoPB protocolInfoProxy = getProtocolMetaInfoProxy(rpcProxy,
-          conf);
-      GetProtocolSignatureRequestProto.Builder builder = 
-          GetProtocolSignatureRequestProto.newBuilder();
-      builder.setProtocol(protocol.getName());
-      builder.setRpcKind(rpcKind.toString());
-      GetProtocolSignatureResponseProto resp;
-      try {
-        resp = protocolInfoProxy.getProtocolSignature(NULL_CONTROLLER,
-            builder.build());
-      } catch (ServiceException se) {
-        throw ProtobufHelper.getRemoteException(se);
-      }
-      versionMap = convertProtocolSignatureProtos(resp
-          .getProtocolSignatureList());
-      putVersionSignatureMap(serverAddress, protocol.getName(),
-          rpcKind.toString(), versionMap);
-    }
-    // Assuming unique method names.
-    Method desiredMethod;
-    Method[] allMethods = protocol.getMethods();
-    desiredMethod = null;
-    for (Method m : allMethods) {
-      if (m.getName().equals(methodName)) {
-        desiredMethod = m;
-        break;
-      }
-    }
-    if (desiredMethod == null) {
-      return false;
-    }
-    int methodHash = ProtocolSignature.getFingerprint(desiredMethod);
-    return methodExists(methodHash, version, versionMap);
-  }
-  
-  private static Map<Long, ProtocolSignature> 
-  convertProtocolSignatureProtos(List<ProtocolSignatureProto> protoList) {
-    Map<Long, ProtocolSignature> map = new TreeMap<Long, ProtocolSignature>();
-    for (ProtocolSignatureProto p : protoList) {
-      int [] methods = new int[p.getMethodsList().size()];
-      int index=0;
-      for (int m : p.getMethodsList()) {
-        methods[index++] = m;
-      }
-      map.put(p.getVersion(), new ProtocolSignature(p.getVersion(), methods));
-    }
-    return map;
-  }
-
-  private static boolean methodExists(int methodHash, long version,
-      Map<Long, ProtocolSignature> versionMap) {
-    ProtocolSignature sig = versionMap.get(version);
-    if (sig != null) {
-      for (int m : sig.getMethods()) {
-        if (m == methodHash) {
-          return true;
-        }
-      }
-    }
-    return false;
-  }
-  
-  // The proxy returned re-uses the underlying connection. This is a special 
-  // mechanism for ProtocolMetaInfoPB.
-  // Don't do this for any other protocol, it might cause a security hole.
-  private static ProtocolMetaInfoPB getProtocolMetaInfoProxy(Object proxy,
-      Configuration conf) throws IOException {
-    RpcInvocationHandler inv = (RpcInvocationHandler) Proxy
-        .getInvocationHandler(proxy);
-    return RPC
-        .getProtocolEngine(ProtocolMetaInfoPB.class, conf)
-        .getProtocolMetaInfoProxy(inv.getConnectionId(), conf,
-            NetUtils.getDefaultSocketFactory(conf)).getProxy();
-  }
-}

+ 3 - 13
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcEngine.java

@@ -26,7 +26,6 @@ import javax.net.SocketFactory;
 
 
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.ipc.Client.ConnectionId;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -42,6 +41,9 @@ public interface RpcEngine {
                   UserGroupInformation ticket, Configuration conf,
                   UserGroupInformation ticket, Configuration conf,
                   SocketFactory factory, int rpcTimeout) throws IOException;
                   SocketFactory factory, int rpcTimeout) throws IOException;
 
 
+  /** Stop this proxy. */
+  void stopProxy(Object proxy);
+
   /** Expert: Make multiple, parallel calls to a set of servers. */
   /** Expert: Make multiple, parallel calls to a set of servers. */
   Object[] call(Method method, Object[][] params, InetSocketAddress[] addrs,
   Object[] call(Method method, Object[][] params, InetSocketAddress[] addrs,
                 UserGroupInformation ticket, Configuration conf)
                 UserGroupInformation ticket, Configuration conf)
@@ -55,16 +57,4 @@ public interface RpcEngine {
                        SecretManager<? extends TokenIdentifier> secretManager
                        SecretManager<? extends TokenIdentifier> secretManager
                        ) throws IOException;
                        ) throws IOException;
 
 
-  /**
-   * Returns a proxy for ProtocolMetaInfoPB, which uses the given connection
-   * id.
-   * @param connId, ConnectionId to be used for the proxy.
-   * @param conf, Configuration.
-   * @param factory, Socket factory.
-   * @return Proxy object.
-   * @throws IOException
-   */
-  ProtocolProxy<ProtocolMetaInfoPB> getProtocolMetaInfoProxy(
-      ConnectionId connId, Configuration conf, SocketFactory factory)
-      throws IOException;
 }
 }

+ 0 - 36
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcInvocationHandler.java

@@ -1,36 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.ipc;
-
-import java.io.Closeable;
-import java.lang.reflect.InvocationHandler;
-
-import org.apache.hadoop.ipc.Client.ConnectionId;
-
-/**
- * This interface must be implemented by all InvocationHandler
- * implementations.
- */
-public interface RpcInvocationHandler extends InvocationHandler, Closeable {
-  
-  /**
-   * Returns the connection id associated with the InvocationHandler instance.
-   * @return ConnectionId
-   */
-  ConnectionId getConnectionId();
-}

+ 0 - 118
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcPayloadHeader.java

@@ -1,118 +0,0 @@
-package org.apache.hadoop.ipc;
-
-import java.io.DataInput;
-import java.io.DataOutput;
-import java.io.IOException;
-
-import org.apache.hadoop.io.Writable;
-
-/**
- * This is the rpc payload header. It is sent with every rpc call
- * <pre>
- * The format of RPC call is as follows:
- * +---------------------------------------------------+
- * |  Rpc length in bytes (header + payload length)    |
- * +---------------------------------------------------+
- * |      Rpc Header       |       Rpc Payload         |
- * +---------------------------------------------------+
- * 
- * The format of Rpc Header is:
- * +----------------------------------+
- * |  RpcKind (1 bytes)               |      
- * +----------------------------------+
- * |  RpcPayloadOperation (1 bytes)   |      
- * +----------------------------------+
- * |  Call ID (4 bytes)               |      
- * +----------------------------------+
- * 
- * {@link RpcKind} determines the type of serialization used for Rpc Payload.
- * </pre>
- * <p>
- * <b>Note this header does NOT have its own version number, 
- * it used the version number from the connection header. </b>
- */
-public class RpcPayloadHeader implements Writable {
-  public enum RpcPayloadOperation {
-    RPC_FINAL_PAYLOAD ((short)1),
-    RPC_CONTINUATION_PAYLOAD ((short)2), // not implemented yet
-    RPC_CLOSE_CONNECTION ((short)3);     // close the rpc connection
-    
-    private final short code;
-    private static final short FIRST_INDEX = RPC_FINAL_PAYLOAD.code;
-    RpcPayloadOperation(short val) {
-      this.code = val;
-    }
-    
-    public void write(DataOutput out) throws IOException {  
-      out.writeByte(code);
-    }
-    
-    static RpcPayloadOperation readFields(DataInput in) throws IOException {
-      short inValue = in.readByte();
-      return RpcPayloadOperation.values()[inValue - FIRST_INDEX];
-    }
-  }
-  
-  public enum RpcKind {
-    RPC_BUILTIN ((short) 1),         // Used for built in calls by tests
-    RPC_WRITABLE ((short) 2),        // Use WritableRpcEngine 
-    RPC_PROTOCOL_BUFFER ((short) 3); // Use ProtobufRpcEngine
-    final static short MAX_INDEX = RPC_PROTOCOL_BUFFER.value; // used for array size
-    private static final short FIRST_INDEX = RPC_BUILTIN.value;    
-    private final short value;
-
-    RpcKind(short val) {
-      this.value = val;
-    }
-    
-    public void write(DataOutput out) throws IOException {
-      out.writeByte(value);
-    }
-    
-    static RpcKind readFields(DataInput in) throws IOException {
-      short inValue = in.readByte();
-      return RpcKind.values()[inValue - FIRST_INDEX];
-    }  
-  }
-  
-  private RpcKind kind;
-  private RpcPayloadOperation operation;
-  private int callId;
-  
-  public RpcPayloadHeader() {
-    kind = RpcKind.RPC_WRITABLE;
-    operation = RpcPayloadOperation.RPC_CLOSE_CONNECTION;
-  }
-  
-  public RpcPayloadHeader(RpcKind kind, RpcPayloadOperation op, int callId) {
-    this.kind  = kind;
-    this.operation = op;
-    this.callId = callId;
-  }
-  
-  int getCallId() {
-    return callId;
-  }
-  
-  RpcKind getkind() {
-    return kind;
-  }
-  
-  RpcPayloadOperation getOperation() {
-    return operation;
-  }
-
-  @Override
-  public void write(DataOutput out) throws IOException {
-    kind.write(out);
-    operation.write(out);
-    out.writeInt(callId); 
-  }
-
-  @Override
-  public void readFields(DataInput in) throws IOException {
-    kind = RpcKind.readFields(in);
-    operation = RpcPayloadOperation.readFields(in);
-    this.callId = in.readInt();
-  }
-}

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/RpcServerException.java

@@ -25,9 +25,10 @@ public class RpcServerException extends RpcException {
 
 
   /**
   /**
    * Constructs exception with the specified detail message.
    * Constructs exception with the specified detail message.
-   * @param message detailed message.
+   * 
+   * @param messages detailed message.
    */
    */
-  public RpcServerException(final String message) {
+  RpcServerException(final String message) {
     super(message);
     super(message);
   }
   }
   
   
@@ -35,11 +36,12 @@ public class RpcServerException extends RpcException {
    * Constructs exception with the specified detail message and cause.
    * Constructs exception with the specified detail message and cause.
    * 
    * 
    * @param message message.
    * @param message message.
+   * @param cause that cause this exception
    * @param cause the cause (can be retried by the {@link #getCause()} method).
    * @param cause the cause (can be retried by the {@link #getCause()} method).
    *          (A <tt>null</tt> value is permitted, and indicates that the cause
    *          (A <tt>null</tt> value is permitted, and indicates that the cause
    *          is nonexistent or unknown.)
    *          is nonexistent or unknown.)
    */
    */
-  public RpcServerException(final String message, final Throwable cause) {
+  RpcServerException(final String message, final Throwable cause) {
     super(message, cause);
     super(message, cause);
   }
   }
 }
 }

+ 102 - 293
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/Server.java

@@ -21,7 +21,6 @@ package org.apache.hadoop.ipc;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayInputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.ByteArrayOutputStream;
 import java.io.DataInputStream;
 import java.io.DataInputStream;
-import java.io.DataOutput;
 import java.io.DataOutputStream;
 import java.io.DataOutputStream;
 import java.io.IOException;
 import java.io.IOException;
 import java.net.BindException;
 import java.net.BindException;
@@ -43,9 +42,7 @@ import java.nio.channels.SocketChannel;
 import java.nio.channels.WritableByteChannel;
 import java.nio.channels.WritableByteChannel;
 import java.security.PrivilegedExceptionAction;
 import java.security.PrivilegedExceptionAction;
 import java.util.ArrayList;
 import java.util.ArrayList;
-import java.util.Arrays;
 import java.util.Collections;
 import java.util.Collections;
-import java.util.HashMap;
 import java.util.Iterator;
 import java.util.Iterator;
 import java.util.LinkedList;
 import java.util.LinkedList;
 import java.util.List;
 import java.util.List;
@@ -69,35 +66,28 @@ import org.apache.hadoop.io.BytesWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.IntWritable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.Writable;
 import org.apache.hadoop.io.WritableUtils;
 import org.apache.hadoop.io.WritableUtils;
-import org.apache.hadoop.ipc.RPC.RpcInvoker;
 import org.apache.hadoop.ipc.RPC.VersionMismatch;
 import org.apache.hadoop.ipc.RPC.VersionMismatch;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcPayloadOperation;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.metrics.RpcDetailedMetrics;
 import org.apache.hadoop.ipc.metrics.RpcMetrics;
 import org.apache.hadoop.ipc.metrics.RpcMetrics;
-import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.net.NetUtils;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.AccessControlException;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.SaslRpcServer;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
 import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
+import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
 import org.apache.hadoop.security.SaslRpcServer.SaslDigestCallbackHandler;
 import org.apache.hadoop.security.SaslRpcServer.SaslDigestCallbackHandler;
 import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
 import org.apache.hadoop.security.SaslRpcServer.SaslGssCallbackHandler;
-import org.apache.hadoop.security.SaslRpcServer.SaslStatus;
-import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
 import org.apache.hadoop.security.UserGroupInformation.AuthenticationMethod;
+import org.apache.hadoop.security.UserGroupInformation;
+import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.AuthorizationException;
 import org.apache.hadoop.security.authorize.PolicyProvider;
 import org.apache.hadoop.security.authorize.PolicyProvider;
-import org.apache.hadoop.security.authorize.ProxyUsers;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
 import org.apache.hadoop.security.authorize.ServiceAuthorizationManager;
+import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
 import org.apache.hadoop.security.token.SecretManager.InvalidToken;
-import org.apache.hadoop.security.token.TokenIdentifier;
-import org.apache.hadoop.util.ProtoUtil;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.ReflectionUtils;
 import org.apache.hadoop.util.StringUtils;
 import org.apache.hadoop.util.StringUtils;
 
 
-import com.google.common.annotations.VisibleForTesting;
-
 /** An abstract IPC service.  IPC calls take a single {@link Writable} as a
 /** An abstract IPC service.  IPC calls take a single {@link Writable} as a
  * parameter, and return a {@link Writable} as their value.  A service runs on
  * parameter, and return a {@link Writable} as their value.  A service runs on
  * a port and is defined by a parameter class and a value class.
  * a port and is defined by a parameter class and a value class.
@@ -113,105 +103,17 @@ public abstract class Server {
    */
    */
   public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
   public static final ByteBuffer HEADER = ByteBuffer.wrap("hrpc".getBytes());
   
   
-  /**
-   * Serialization type for ConnectionContext and RpcPayloadHeader
-   */
-  public enum IpcSerializationType {
-    // Add new serialization type to the end without affecting the enum order
-    PROTOBUF;
-    
-    void write(DataOutput out) throws IOException {
-      out.writeByte(this.ordinal());
-    }
-    
-    static IpcSerializationType fromByte(byte b) {
-      return IpcSerializationType.values()[b];
-    }
-  }
-  
-  /**
-   * If the user accidentally sends an HTTP GET to an IPC port, we detect this
-   * and send back a nicer response.
-   */
-  private static final ByteBuffer HTTP_GET_BYTES = ByteBuffer.wrap(
-      "GET ".getBytes());
-  
-  /**
-   * An HTTP response to send back if we detect an HTTP request to our IPC
-   * port.
-   */
-  static final String RECEIVED_HTTP_REQ_RESPONSE =
-    "HTTP/1.1 404 Not Found\r\n" +
-    "Content-type: text/plain\r\n\r\n" +
-    "It looks like you are making an HTTP request to a Hadoop IPC port. " +
-    "This is not the correct port for the web interface on this daemon.\r\n";
-  
   // 1 : Introduce ping and server does not throw away RPCs
   // 1 : Introduce ping and server does not throw away RPCs
   // 3 : Introduce the protocol into the RPC connection header
   // 3 : Introduce the protocol into the RPC connection header
   // 4 : Introduced SASL security layer
   // 4 : Introduced SASL security layer
   // 5 : Introduced use of {@link ArrayPrimitiveWritable$Internal}
   // 5 : Introduced use of {@link ArrayPrimitiveWritable$Internal}
   //     in ObjectWritable to efficiently transmit arrays of primitives
   //     in ObjectWritable to efficiently transmit arrays of primitives
-  // 6 : Made RPC payload header explicit
-  // 7 : Changed Ipc Connection Header to use Protocol buffers
-  public static final byte CURRENT_VERSION = 7;
+  public static final byte CURRENT_VERSION = 5;
 
 
   /**
   /**
    * Initial and max size of response buffer
    * Initial and max size of response buffer
    */
    */
   static int INITIAL_RESP_BUF_SIZE = 10240;
   static int INITIAL_RESP_BUF_SIZE = 10240;
-  
-  static class RpcKindMapValue {
-    final Class<? extends Writable> rpcRequestWrapperClass;
-    final RpcInvoker rpcInvoker;
-    RpcKindMapValue (Class<? extends Writable> rpcRequestWrapperClass,
-          RpcInvoker rpcInvoker) {
-      this.rpcInvoker = rpcInvoker;
-      this.rpcRequestWrapperClass = rpcRequestWrapperClass;
-    }   
-  }
-  static Map<RpcKind, RpcKindMapValue> rpcKindMap = new
-      HashMap<RpcKind, RpcKindMapValue>(4);
-  
-  
-
-  /**
-   * Register a RPC kind and the class to deserialize the rpc request.
-   * 
-   * Called by static initializers of rpcKind Engines
-   * @param rpcKind
-   * @param rpcRequestWrapperClass - this class is used to deserialze the
-   *  the rpc request.
-   *  @param rpcInvoker - use to process the calls on SS.
-   */
-  
-  public static void registerProtocolEngine(RpcKind rpcKind, 
-          Class<? extends Writable> rpcRequestWrapperClass,
-          RpcInvoker rpcInvoker) {
-    RpcKindMapValue  old = 
-        rpcKindMap.put(rpcKind, new RpcKindMapValue(rpcRequestWrapperClass, rpcInvoker));
-    if (old != null) {
-      rpcKindMap.put(rpcKind, old);
-      throw new IllegalArgumentException("ReRegistration of rpcKind: " +
-          rpcKind);      
-    }
-    LOG.debug("rpcKind=" + rpcKind + 
-        ", rpcRequestWrapperClass=" + rpcRequestWrapperClass + 
-        ", rpcInvoker=" + rpcInvoker);
-  }
-  
-  public Class<? extends Writable> getRpcRequestWrapper(
-      RpcKind rpcKind) {
-    if (rpcRequestClass != null)
-       return rpcRequestClass;
-    RpcKindMapValue val = rpcKindMap.get(rpcKind);
-    return (val == null) ? null : val.rpcRequestWrapperClass; 
-  }
-  
-  public static RpcInvoker  getRpcInvoker(RpcKind rpcKind) {
-    RpcKindMapValue val = rpcKindMap.get(rpcKind);
-    return (val == null) ? null : val.rpcInvoker; 
-  }
-  
 
 
   public static final Log LOG = LogFactory.getLog(Server.class);
   public static final Log LOG = LogFactory.getLog(Server.class);
   public static final Log AUDITLOG = 
   public static final Log AUDITLOG = 
@@ -276,7 +178,7 @@ public abstract class Server {
   private int port;                               // port we listen on
   private int port;                               // port we listen on
   private int handlerCount;                       // number of handler threads
   private int handlerCount;                       // number of handler threads
   private int readThreads;                        // number of read threads
   private int readThreads;                        // number of read threads
-  private Class<? extends Writable> rpcRequestClass;   // class used for deserializing the rpc request
+  private Class<? extends Writable> paramClass;   // class of call parameters
   private int maxIdleTime;                        // the maximum idle time after 
   private int maxIdleTime;                        // the maximum idle time after 
                                                   // which a client may be disconnected
                                                   // which a client may be disconnected
   private int thresholdIdleConnections;           // the number of idle connections
   private int thresholdIdleConnections;           // the number of idle connections
@@ -337,21 +239,10 @@ public abstract class Server {
    * Returns a handle to the rpcMetrics (required in tests)
    * Returns a handle to the rpcMetrics (required in tests)
    * @return rpc metrics
    * @return rpc metrics
    */
    */
-  @VisibleForTesting
   public RpcMetrics getRpcMetrics() {
   public RpcMetrics getRpcMetrics() {
     return rpcMetrics;
     return rpcMetrics;
   }
   }
 
 
-  @VisibleForTesting
-  public RpcDetailedMetrics getRpcDetailedMetrics() {
-    return rpcDetailedMetrics;
-  }
-  
-  @VisibleForTesting
-  Iterable<? extends Thread> getHandlers() {
-    return Arrays.asList(handlers);
-  }
-
   /**
   /**
    * Refresh the service authorization ACL for the service handled by this server.
    * Refresh the service authorization ACL for the service handled by this server.
    */
    */
@@ -370,33 +261,28 @@ public abstract class Server {
 
 
   /** A call queued for handling. */
   /** A call queued for handling. */
   private static class Call {
   private static class Call {
-    private final int callId;             // the client's call id
-    private final Writable rpcRequest;    // Serialized Rpc request from client
-    private final Connection connection;  // connection to client
-    private long timestamp;               // time received when response is null
-                                          // time served when response is not null
-    private ByteBuffer rpcResponse;       // the response for this call
-    private final RpcKind rpcKind;
-
-    public Call(int id, Writable param, Connection connection) {
-      this( id,  param,  connection, RpcKind.RPC_BUILTIN );    
-    }
-    public Call(int id, Writable param, Connection connection, RpcKind kind) { 
-      this.callId = id;
-      this.rpcRequest = param;
+    private int id;                               // the client's call id
+    private Writable param;                       // the parameter passed
+    private Connection connection;                // connection to client
+    private long timestamp;     // the time received when response is null
+                                   // the time served when response is not null
+    private ByteBuffer response;                      // the response for this call
+
+    public Call(int id, Writable param, Connection connection) { 
+      this.id = id;
+      this.param = param;
       this.connection = connection;
       this.connection = connection;
       this.timestamp = System.currentTimeMillis();
       this.timestamp = System.currentTimeMillis();
-      this.rpcResponse = null;
-      this.rpcKind = kind;
+      this.response = null;
     }
     }
     
     
     @Override
     @Override
     public String toString() {
     public String toString() {
-      return rpcRequest.toString() + " from " + connection.toString();
+      return param.toString() + " from " + connection.toString();
     }
     }
 
 
     public void setResponse(ByteBuffer response) {
     public void setResponse(ByteBuffer response) {
-      this.rpcResponse = response;
+      this.response = response;
     }
     }
   }
   }
 
 
@@ -895,17 +781,17 @@ public abstract class Server {
           call = responseQueue.removeFirst();
           call = responseQueue.removeFirst();
           SocketChannel channel = call.connection.channel;
           SocketChannel channel = call.connection.channel;
           if (LOG.isDebugEnabled()) {
           if (LOG.isDebugEnabled()) {
-            LOG.debug(getName() + ": responding to #" + call.callId + " from " +
+            LOG.debug(getName() + ": responding to #" + call.id + " from " +
                       call.connection);
                       call.connection);
           }
           }
           //
           //
           // Send as much data as we can in the non-blocking fashion
           // Send as much data as we can in the non-blocking fashion
           //
           //
-          int numBytes = channelWrite(channel, call.rpcResponse);
+          int numBytes = channelWrite(channel, call.response);
           if (numBytes < 0) {
           if (numBytes < 0) {
             return true;
             return true;
           }
           }
-          if (!call.rpcResponse.hasRemaining()) {
+          if (!call.response.hasRemaining()) {
             call.connection.decRpcCount();
             call.connection.decRpcCount();
             if (numElements == 1) {    // last call fully processes.
             if (numElements == 1) {    // last call fully processes.
               done = true;             // no more data for this channel.
               done = true;             // no more data for this channel.
@@ -913,7 +799,7 @@ public abstract class Server {
               done = false;            // more calls pending to be sent.
               done = false;            // more calls pending to be sent.
             }
             }
             if (LOG.isDebugEnabled()) {
             if (LOG.isDebugEnabled()) {
-              LOG.debug(getName() + ": responding to #" + call.callId + " from " +
+              LOG.debug(getName() + ": responding to #" + call.id + " from " +
                         call.connection + " Wrote " + numBytes + " bytes.");
                         call.connection + " Wrote " + numBytes + " bytes.");
             }
             }
           } else {
           } else {
@@ -941,7 +827,7 @@ public abstract class Server {
               }
               }
             }
             }
             if (LOG.isDebugEnabled()) {
             if (LOG.isDebugEnabled()) {
-              LOG.debug(getName() + ": responding to #" + call.callId + " from " +
+              LOG.debug(getName() + ": responding to #" + call.id + " from " +
                         call.connection + " Wrote partial " + numBytes + 
                         call.connection + " Wrote partial " + numBytes + 
                         " bytes.");
                         " bytes.");
             }
             }
@@ -988,9 +874,9 @@ public abstract class Server {
 
 
   /** Reads calls from a connection and queues them for handling. */
   /** Reads calls from a connection and queues them for handling. */
   public class Connection {
   public class Connection {
-    private boolean connectionHeaderRead = false; // connection  header is read?
-    private boolean connectionContextRead = false; //if connection context that
-                                            //follows connection header is read
+    private boolean rpcHeaderRead = false; // if initial rpc header is read
+    private boolean headerRead = false;  //if the connection header that
+                                         //follows version is read.
 
 
     private SocketChannel channel;
     private SocketChannel channel;
     private ByteBuffer data;
     private ByteBuffer data;
@@ -1006,14 +892,14 @@ public abstract class Server {
     private int remotePort;
     private int remotePort;
     private InetAddress addr;
     private InetAddress addr;
     
     
-    IpcConnectionContextProto connectionContext;
-    String protocolName;
+    ConnectionHeader header = new ConnectionHeader();
+    Class<?> protocol;
     boolean useSasl;
     boolean useSasl;
     SaslServer saslServer;
     SaslServer saslServer;
     private AuthMethod authMethod;
     private AuthMethod authMethod;
     private boolean saslContextEstablished;
     private boolean saslContextEstablished;
     private boolean skipInitialSaslHandshake;
     private boolean skipInitialSaslHandshake;
-    private ByteBuffer connectionHeaderBuf = null;
+    private ByteBuffer rpcHeaderBuffer;
     private ByteBuffer unwrappedData;
     private ByteBuffer unwrappedData;
     private ByteBuffer unwrappedDataLengthBuffer;
     private ByteBuffer unwrappedDataLengthBuffer;
     
     
@@ -1027,7 +913,6 @@ public abstract class Server {
     private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream();
     private ByteArrayOutputStream authFailedResponse = new ByteArrayOutputStream();
     // Fake 'call' for SASL context setup
     // Fake 'call' for SASL context setup
     private static final int SASL_CALLID = -33;
     private static final int SASL_CALLID = -33;
-    
     private final Call saslCall = new Call(SASL_CALLID, null, this);
     private final Call saslCall = new Call(SASL_CALLID, null, this);
     private final ByteArrayOutputStream saslResponse = new ByteArrayOutputStream();
     private final ByteArrayOutputStream saslResponse = new ByteArrayOutputStream();
     
     
@@ -1131,7 +1016,6 @@ public abstract class Server {
                 throw new AccessControlException(
                 throw new AccessControlException(
                     "Server is not configured to do DIGEST authentication.");
                     "Server is not configured to do DIGEST authentication.");
               }
               }
-              secretManager.checkAvailableForRead();
               saslServer = Sasl.createSaslServer(AuthMethod.DIGEST
               saslServer = Sasl.createSaslServer(AuthMethod.DIGEST
                   .getMechanismName(), null, SaslRpcServer.SASL_DEFAULT_REALM,
                   .getMechanismName(), null, SaslRpcServer.SASL_DEFAULT_REALM,
                   SaslRpcServer.SASL_PROPS, new SaslDigestCallbackHandler(
                   SaslRpcServer.SASL_PROPS, new SaslDigestCallbackHandler(
@@ -1261,30 +1145,21 @@ public abstract class Server {
           if (count < 0 || dataLengthBuffer.remaining() > 0) 
           if (count < 0 || dataLengthBuffer.remaining() > 0) 
             return count;
             return count;
         }
         }
-        
-        if (!connectionHeaderRead) {
+      
+        if (!rpcHeaderRead) {
           //Every connection is expected to send the header.
           //Every connection is expected to send the header.
-          if (connectionHeaderBuf == null) {
-            connectionHeaderBuf = ByteBuffer.allocate(3);
+          if (rpcHeaderBuffer == null) {
+            rpcHeaderBuffer = ByteBuffer.allocate(2);
           }
           }
-          count = channelRead(channel, connectionHeaderBuf);
-          if (count < 0 || connectionHeaderBuf.remaining() > 0) {
+          count = channelRead(channel, rpcHeaderBuffer);
+          if (count < 0 || rpcHeaderBuffer.remaining() > 0) {
             return count;
             return count;
           }
           }
-          int version = connectionHeaderBuf.get(0);
-          byte[] method = new byte[] {connectionHeaderBuf.get(1)};
+          int version = rpcHeaderBuffer.get(0);
+          byte[] method = new byte[] {rpcHeaderBuffer.get(1)};
           authMethod = AuthMethod.read(new DataInputStream(
           authMethod = AuthMethod.read(new DataInputStream(
               new ByteArrayInputStream(method)));
               new ByteArrayInputStream(method)));
-          dataLengthBuffer.flip();
-          
-          // Check if it looks like the user is hitting an IPC port
-          // with an HTTP GET - this is a common error, so we can
-          // send back a simple string indicating as much.
-          if (HTTP_GET_BYTES.equals(dataLengthBuffer)) {
-            setupHttpRequestOnIpcPortResponse();
-            return -1;
-          }
-        
+          dataLengthBuffer.flip();          
           if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) {
           if (!HEADER.equals(dataLengthBuffer) || version != CURRENT_VERSION) {
             //Warning is ok since this is not supposed to happen.
             //Warning is ok since this is not supposed to happen.
             LOG.warn("Incorrect header or version mismatch from " + 
             LOG.warn("Incorrect header or version mismatch from " + 
@@ -1294,14 +1169,6 @@ public abstract class Server {
             setupBadVersionResponse(version);
             setupBadVersionResponse(version);
             return -1;
             return -1;
           }
           }
-          
-          IpcSerializationType serializationType = IpcSerializationType
-              .fromByte(connectionHeaderBuf.get(2));
-          if (serializationType != IpcSerializationType.PROTOBUF) {
-            respondUnsupportedSerialization(serializationType);
-            return -1;
-          }
-          
           dataLengthBuffer.clear();
           dataLengthBuffer.clear();
           if (authMethod == null) {
           if (authMethod == null) {
             throw new IOException("Unable to read authentication method");
             throw new IOException("Unable to read authentication method");
@@ -1331,8 +1198,8 @@ public abstract class Server {
             useSasl = true;
             useSasl = true;
           }
           }
           
           
-          connectionHeaderBuf = null;
-          connectionHeaderRead = true;
+          rpcHeaderBuffer = null;
+          rpcHeaderRead = true;
           continue;
           continue;
         }
         }
         
         
@@ -1363,7 +1230,7 @@ public abstract class Server {
             skipInitialSaslHandshake = false;
             skipInitialSaslHandshake = false;
             continue;
             continue;
           }
           }
-          boolean isHeaderRead = connectionContextRead;
+          boolean isHeaderRead = headerRead;
           if (useSasl) {
           if (useSasl) {
             saslReadAndProcess(data.array());
             saslReadAndProcess(data.array());
           } else {
           } else {
@@ -1411,34 +1278,23 @@ public abstract class Server {
         responder.doRespond(fakeCall);
         responder.doRespond(fakeCall);
       }
       }
     }
     }
-    
-    private void respondUnsupportedSerialization(IpcSerializationType st) throws IOException {
-      String errMsg = "Server IPC version " + CURRENT_VERSION
-          + " do not support serilization " + st.toString();
-      ByteArrayOutputStream buffer = new ByteArrayOutputStream();
-
-      Call fakeCall = new Call(-1, null, this);
-      setupResponse(buffer, fakeCall, Status.FATAL, null,
-          IpcException.class.getName(), errMsg);
-      responder.doRespond(fakeCall);
-    }
-    
-    private void setupHttpRequestOnIpcPortResponse() throws IOException {
-      Call fakeCall =  new Call(0, null, this);
-      fakeCall.setResponse(ByteBuffer.wrap(
-          RECEIVED_HTTP_REQ_RESPONSE.getBytes()));
-      responder.doRespond(fakeCall);
-    }
 
 
-    /** Reads the connection context following the connection header */
-    private void processConnectionContext(byte[] buf) throws IOException {
+    /// Reads the connection header following version
+    private void processHeader(byte[] buf) throws IOException {
       DataInputStream in =
       DataInputStream in =
         new DataInputStream(new ByteArrayInputStream(buf));
         new DataInputStream(new ByteArrayInputStream(buf));
-      connectionContext = IpcConnectionContextProto.parseFrom(in);
-      protocolName = connectionContext.hasProtocol() ? connectionContext
-          .getProtocol() : null;
-
-      UserGroupInformation protocolUser = ProtoUtil.getUgi(connectionContext);
+      header.readFields(in);
+      try {
+        String protocolClassName = header.getProtocol();
+        if (protocolClassName != null) {
+          protocol = getProtocolClass(header.getProtocol(), conf);
+          rpcDetailedMetrics.init(protocol);
+        }
+      } catch (ClassNotFoundException cnfe) {
+        throw new IOException("Unknown protocol: " + header.getProtocol());
+      }
+      
+      UserGroupInformation protocolUser = header.getUgi();
       if (!useSasl) {
       if (!useSasl) {
         user = protocolUser;
         user = protocolUser;
         if (user != null) {
         if (user != null) {
@@ -1512,15 +1368,15 @@ public abstract class Server {
     
     
     private void processOneRpc(byte[] buf) throws IOException,
     private void processOneRpc(byte[] buf) throws IOException,
         InterruptedException {
         InterruptedException {
-      if (connectionContextRead) {
+      if (headerRead) {
         processData(buf);
         processData(buf);
       } else {
       } else {
-        processConnectionContext(buf);
-        connectionContextRead = true;
+        processHeader(buf);
+        headerRead = true;
         if (!authorizeConnection()) {
         if (!authorizeConnection()) {
           throw new AccessControlException("Connection from " + this
           throw new AccessControlException("Connection from " + this
-              + " for protocol " + connectionContext.getProtocol()
-              + " is unauthorized for user " + user);      
+              + " for protocol " + header.getProtocol()
+              + " is unauthorized for user " + user);
         }
         }
       }
       }
     }
     }
@@ -1528,43 +1384,18 @@ public abstract class Server {
     private void processData(byte[] buf) throws  IOException, InterruptedException {
     private void processData(byte[] buf) throws  IOException, InterruptedException {
       DataInputStream dis =
       DataInputStream dis =
         new DataInputStream(new ByteArrayInputStream(buf));
         new DataInputStream(new ByteArrayInputStream(buf));
-      RpcPayloadHeader header = new RpcPayloadHeader();
-      header.readFields(dis);           // Read the RpcPayload header
+      int id = dis.readInt();                    // try to read an id
         
         
       if (LOG.isDebugEnabled())
       if (LOG.isDebugEnabled())
-        LOG.debug(" got #" + header.getCallId());
-      if (header.getOperation() != RpcPayloadOperation.RPC_FINAL_PAYLOAD) {
-        throw new IOException("IPC Server does not implement operation" + 
-              header.getOperation());
-      }
-      // If we know the rpc kind, get its class so that we can deserialize
-      // (Note it would make more sense to have the handler deserialize but 
-      // we continue with this original design.
-      Class<? extends Writable> rpcRequestClass = 
-          getRpcRequestWrapper(header.getkind());
-      if (rpcRequestClass == null) {
-        LOG.warn("Unknown rpc kind "  + header.getkind() + 
-            " from client " + getHostAddress());
-        final Call readParamsFailedCall = 
-            new Call(header.getCallId(), null, this);
-        ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
-
-        setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
-            IOException.class.getName(),
-            "Unknown rpc kind "  + header.getkind());
-        responder.doRespond(readParamsFailedCall);
-        return;   
-      }
-      Writable rpcRequest;
-      try { //Read the rpc request
-        rpcRequest = ReflectionUtils.newInstance(rpcRequestClass, conf);
-        rpcRequest.readFields(dis);
+        LOG.debug(" got #" + id);
+      Writable param;
+      try {
+        param = ReflectionUtils.newInstance(paramClass, conf);//read param
+        param.readFields(dis);
       } catch (Throwable t) {
       } catch (Throwable t) {
         LOG.warn("Unable to read call parameters for client " +
         LOG.warn("Unable to read call parameters for client " +
-                 getHostAddress() + "on connection protocol " +
-            this.protocolName + " for rpcKind " + header.getkind(),  t);
-        final Call readParamsFailedCall = 
-            new Call(header.getCallId(), null, this);
+                 getHostAddress(), t);
+        final Call readParamsFailedCall = new Call(id, null, this);
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
         ByteArrayOutputStream responseBuffer = new ByteArrayOutputStream();
 
 
         setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
         setupResponse(responseBuffer, readParamsFailedCall, Status.FATAL, null,
@@ -1574,7 +1405,7 @@ public abstract class Server {
         return;
         return;
       }
       }
         
         
-      Call call = new Call(header.getCallId(), rpcRequest, this, header.getkind());
+      Call call = new Call(id, param, this);
       callQueue.put(call);              // queue the call; maybe blocked here
       callQueue.put(call);              // queue the call; maybe blocked here
       incRpcCount();  // Increment the rpc count
       incRpcCount();  // Increment the rpc count
     }
     }
@@ -1589,9 +1420,9 @@ public abstract class Server {
             && (authMethod != AuthMethod.DIGEST)) {
             && (authMethod != AuthMethod.DIGEST)) {
           ProxyUsers.authorize(user, this.getHostAddress(), conf);
           ProxyUsers.authorize(user, this.getHostAddress(), conf);
         }
         }
-        authorize(user, protocolName, getHostInetAddress());
+        authorize(user, header, getHostInetAddress());
         if (LOG.isDebugEnabled()) {
         if (LOG.isDebugEnabled()) {
-          LOG.debug("Successfully authorized " + connectionContext);
+          LOG.debug("Successfully authorized " + header);
         }
         }
         rpcMetrics.incrAuthorizationSuccesses();
         rpcMetrics.incrAuthorizationSuccesses();
       } catch (AuthorizationException ae) {
       } catch (AuthorizationException ae) {
@@ -1636,10 +1467,11 @@ public abstract class Server {
       while (running) {
       while (running) {
         try {
         try {
           final Call call = callQueue.take(); // pop the queue; maybe blocked here
           final Call call = callQueue.take(); // pop the queue; maybe blocked here
-          if (LOG.isDebugEnabled()) {
-            LOG.debug(getName() + ": has Call#" + call.callId + 
-                "for RpcKind " + call.rpcKind + " from " + call.connection);
-          }
+
+          if (LOG.isDebugEnabled())
+            LOG.debug(getName() + ": has #" + call.id + " from " +
+                      call.connection);
+          
           String errorClass = null;
           String errorClass = null;
           String error = null;
           String error = null;
           Writable value = null;
           Writable value = null;
@@ -1649,7 +1481,7 @@ public abstract class Server {
             // Make the call as the user via Subject.doAs, thus associating
             // Make the call as the user via Subject.doAs, thus associating
             // the call with the Subject
             // the call with the Subject
             if (call.connection.user == null) {
             if (call.connection.user == null) {
-              value = call(call.rpcKind, call.connection.protocolName, call.rpcRequest, 
+              value = call(call.connection.protocol, call.param, 
                            call.timestamp);
                            call.timestamp);
             } else {
             } else {
               value = 
               value = 
@@ -1658,28 +1490,15 @@ public abstract class Server {
                      @Override
                      @Override
                      public Writable run() throws Exception {
                      public Writable run() throws Exception {
                        // make the call
                        // make the call
-                       return call(call.rpcKind, call.connection.protocolName, 
-                                   call.rpcRequest, call.timestamp);
+                       return call(call.connection.protocol, 
+                                   call.param, call.timestamp);
 
 
                      }
                      }
                    }
                    }
                   );
                   );
             }
             }
           } catch (Throwable e) {
           } catch (Throwable e) {
-            String logMsg = getName() + ", call " + call + ": error: " + e;
-            if (e instanceof RuntimeException || e instanceof Error) {
-              // These exception types indicate something is probably wrong
-              // on the server side, as opposed to just a normal exceptional
-              // result.
-              LOG.warn(logMsg, e);
-            } else if (e instanceof StandbyException) {
-              // Don't log the whole stack trace of these exceptions.
-              // Way too noisy!
-              LOG.info(logMsg);
-            } else {
-              LOG.info(logMsg, e);
-            }
-
+            LOG.info(getName() + ", call: " + call + ", error: ", e);
             errorClass = e.getClass().getName();
             errorClass = e.getClass().getName();
             error = StringUtils.stringifyException(e);
             error = StringUtils.stringifyException(e);
             // Remove redundant error class name from the beginning of the stack trace
             // Remove redundant error class name from the beginning of the stack trace
@@ -1724,33 +1543,24 @@ public abstract class Server {
                   Configuration conf)
                   Configuration conf)
     throws IOException 
     throws IOException 
   {
   {
-    this(bindAddress, port, paramClass, handlerCount, -1, -1, conf, Integer
-        .toString(port), null);
+    this(bindAddress, port, paramClass, handlerCount, -1, -1, conf, Integer.toString(port), null);
   }
   }
   
   
-  /** 
-   * Constructs a server listening on the named port and address.  Parameters passed must
+  /** Constructs a server listening on the named port and address.  Parameters passed must
    * be of the named class.  The <code>handlerCount</handlerCount> determines
    * be of the named class.  The <code>handlerCount</handlerCount> determines
    * the number of handler threads that will be used to process calls.
    * the number of handler threads that will be used to process calls.
    * If queueSizePerHandler or numReaders are not -1 they will be used instead of parameters
    * If queueSizePerHandler or numReaders are not -1 they will be used instead of parameters
    * from configuration. Otherwise the configuration will be picked up.
    * from configuration. Otherwise the configuration will be picked up.
-   * 
-   * If rpcRequestClass is null then the rpcRequestClass must have been 
-   * registered via {@link #registerProtocolEngine(RpcPayloadHeader.RpcKind,
-   *  Class, RPC.RpcInvoker)}
-   * This parameter has been retained for compatibility with existing tests
-   * and usage.
    */
    */
   @SuppressWarnings("unchecked")
   @SuppressWarnings("unchecked")
-  protected Server(String bindAddress, int port,
-      Class<? extends Writable> rpcRequestClass, int handlerCount,
-      int numReaders, int queueSizePerHandler, Configuration conf,
-      String serverName, SecretManager<? extends TokenIdentifier> secretManager)
+  protected Server(String bindAddress, int port, 
+                  Class<? extends Writable> paramClass, int handlerCount, int numReaders, int queueSizePerHandler,
+                  Configuration conf, String serverName, SecretManager<? extends TokenIdentifier> secretManager) 
     throws IOException {
     throws IOException {
     this.bindAddress = bindAddress;
     this.bindAddress = bindAddress;
     this.conf = conf;
     this.conf = conf;
     this.port = port;
     this.port = port;
-    this.rpcRequestClass = rpcRequestClass; 
+    this.paramClass = paramClass;
     this.handlerCount = handlerCount;
     this.handlerCount = handlerCount;
     this.socketSendBufferSize = 0;
     this.socketSendBufferSize = 0;
     if (queueSizePerHandler != -1) {
     if (queueSizePerHandler != -1) {
@@ -1831,7 +1641,7 @@ public abstract class Server {
   throws IOException {
   throws IOException {
     response.reset();
     response.reset();
     DataOutputStream out = new DataOutputStream(response);
     DataOutputStream out = new DataOutputStream(response);
-    out.writeInt(call.callId);                // write call id
+    out.writeInt(call.id);                // write call id
     out.writeInt(status.state);           // write status
     out.writeInt(status.state);           // write status
 
 
     if (status == Status.SUCCESS) {
     if (status == Status.SUCCESS) {
@@ -1948,38 +1758,37 @@ public abstract class Server {
   
   
   /** 
   /** 
    * Called for each call. 
    * Called for each call. 
-   * @deprecated Use  {@link #call(RpcPayloadHeader.RpcKind, String,
-   *  Writable, long)} instead
+   * @deprecated Use {@link #call(Class, Writable, long)} instead
    */
    */
   @Deprecated
   @Deprecated
-  public Writable call(Writable param, long receiveTime) throws Exception {
-    return call(RpcKind.RPC_BUILTIN, null, param, receiveTime);
+  public Writable call(Writable param, long receiveTime) throws IOException {
+    return call(null, param, receiveTime);
   }
   }
   
   
   /** Called for each call. */
   /** Called for each call. */
-  public abstract Writable call(RpcKind rpcKind, String protocol,
-      Writable param, long receiveTime) throws Exception;
+  public abstract Writable call(Class<?> protocol,
+                               Writable param, long receiveTime)
+  throws IOException;
   
   
   /**
   /**
    * Authorize the incoming client connection.
    * Authorize the incoming client connection.
    * 
    * 
    * @param user client user
    * @param user client user
-   * @param protocolName - the protocol
+   * @param connection incoming connection
    * @param addr InetAddress of incoming connection
    * @param addr InetAddress of incoming connection
    * @throws AuthorizationException when the client isn't authorized to talk the protocol
    * @throws AuthorizationException when the client isn't authorized to talk the protocol
    */
    */
-  private void authorize(UserGroupInformation user, String protocolName,
-      InetAddress addr) throws AuthorizationException {
+  public void authorize(UserGroupInformation user, 
+                        ConnectionHeader connection,
+                        InetAddress addr
+                        ) throws AuthorizationException {
     if (authorize) {
     if (authorize) {
-      if (protocolName == null) {
-        throw new AuthorizationException("Null protocol not authorized");
-      }
       Class<?> protocol = null;
       Class<?> protocol = null;
       try {
       try {
-        protocol = getProtocolClass(protocolName, getConf());
+        protocol = getProtocolClass(connection.getProtocol(), getConf());
       } catch (ClassNotFoundException cfne) {
       } catch (ClassNotFoundException cfne) {
         throw new AuthorizationException("Unknown protocol: " + 
         throw new AuthorizationException("Unknown protocol: " + 
-                                         protocolName);
+                                         connection.getProtocol());
       }
       }
       serviceAuthorizationManager.authorize(user, protocol, getConf(), addr);
       serviceAuthorizationManager.authorize(user, protocol, getConf(), addr);
     }
     }
@@ -2109,5 +1918,5 @@ public abstract class Server {
 
 
     int nBytes = initialRemaining - buf.remaining(); 
     int nBytes = initialRemaining - buf.remaining(); 
     return (nBytes > 0) ? nBytes : ret;
     return (nBytes > 0) ? nBytes : ret;
-  }
+  }      
 }
 }

+ 1 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/StandbyException.java

@@ -17,8 +17,6 @@
  */
  */
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
-import java.io.IOException;
-
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 
 
 /**
 /**
@@ -26,7 +24,7 @@ import org.apache.hadoop.classification.InterfaceStability;
  * set of servers in which only a subset may be active.
  * set of servers in which only a subset may be active.
  */
  */
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public class StandbyException extends IOException {
+public class StandbyException extends Exception {
   static final long serialVersionUID = 0x12308AD010L;
   static final long serialVersionUID = 0x12308AD010L;
   public StandbyException(String msg) {
   public StandbyException(String msg) {
     super(msg);
     super(msg);

+ 1 - 0
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/VersionedProtocol.java

@@ -34,6 +34,7 @@ public interface VersionedProtocol {
    * @return the version that the server will speak
    * @return the version that the server will speak
    * @throws IOException if any IO error occurs
    * @throws IOException if any IO error occurs
    */
    */
+  @Deprecated
   public long getProtocolVersion(String protocol,
   public long getProtocolVersion(String protocol,
                                  long clientVersion) throws IOException;
                                  long clientVersion) throws IOException;
 
 

+ 121 - 253
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/ipc/WritableRpcEngine.java

@@ -18,23 +18,23 @@
 
 
 package org.apache.hadoop.ipc;
 package org.apache.hadoop.ipc;
 
 
+import java.lang.reflect.Field;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.Proxy;
 import java.lang.reflect.Method;
 import java.lang.reflect.Method;
 import java.lang.reflect.Array;
 import java.lang.reflect.Array;
+import java.lang.reflect.InvocationHandler;
 import java.lang.reflect.InvocationTargetException;
 import java.lang.reflect.InvocationTargetException;
 
 
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.io.*;
 import java.io.*;
+import java.util.Map;
+import java.util.HashMap;
 
 
 import javax.net.SocketFactory;
 import javax.net.SocketFactory;
 
 
 import org.apache.commons.logging.*;
 import org.apache.commons.logging.*;
 
 
 import org.apache.hadoop.io.*;
 import org.apache.hadoop.io.*;
-import org.apache.hadoop.ipc.Client.ConnectionId;
-import org.apache.hadoop.ipc.RPC.RpcInvoker;
-import org.apache.hadoop.ipc.RpcPayloadHeader.RpcKind;
-import org.apache.hadoop.ipc.VersionedProtocol;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.UserGroupInformation;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
@@ -49,38 +49,8 @@ public class WritableRpcEngine implements RpcEngine {
   
   
   //writableRpcVersion should be updated if there is a change
   //writableRpcVersion should be updated if there is a change
   //in format of the rpc messages.
   //in format of the rpc messages.
-  
-  // 2L - added declared class to Invocation
-  public static final long writableRpcVersion = 2L;
-  
-  /**
-   * Whether or not this class has been initialized.
-   */
-  private static boolean isInitialized = false;
-  
-  static { 
-    ensureInitialized();
-  }
-  
-  /**
-   * Initialize this class if it isn't already.
-   */
-  public static synchronized void ensureInitialized() {
-    if (!isInitialized) {
-      initialize();
-    }
-  }
-  
-  /**
-   * Register the rpcRequest deserializer for WritableRpcEngine
-   */
-  private static synchronized void initialize() {
-    org.apache.hadoop.ipc.Server.registerProtocolEngine(RpcKind.RPC_WRITABLE,
-        Invocation.class, new Server.WritableRpcInvoker());
-    isInitialized = true;
-  }
+  public static long writableRpcVersion = 1L;
 
 
-  
   /** A method invocation, including the method name and its parameters.*/
   /** A method invocation, including the method name and its parameters.*/
   private static class Invocation implements Writable, Configurable {
   private static class Invocation implements Writable, Configurable {
     private String methodName;
     private String methodName;
@@ -89,13 +59,11 @@ public class WritableRpcEngine implements RpcEngine {
     private Configuration conf;
     private Configuration conf;
     private long clientVersion;
     private long clientVersion;
     private int clientMethodsHash;
     private int clientMethodsHash;
-    private String declaringClassProtocolName;
     
     
     //This could be different from static writableRpcVersion when received
     //This could be different from static writableRpcVersion when received
     //at server, if client is using a different version.
     //at server, if client is using a different version.
     private long rpcVersion;
     private long rpcVersion;
 
 
-    @SuppressWarnings("unused") // called when deserializing an invocation
     public Invocation() {}
     public Invocation() {}
 
 
     public Invocation(Method method, Object[] parameters) {
     public Invocation(Method method, Object[] parameters) {
@@ -108,12 +76,18 @@ public class WritableRpcEngine implements RpcEngine {
         clientVersion = 0;
         clientVersion = 0;
         clientMethodsHash = 0;
         clientMethodsHash = 0;
       } else {
       } else {
-        this.clientVersion = RPC.getProtocolVersion(method.getDeclaringClass());
+        try {
+          Field versionField = method.getDeclaringClass().getField("versionID");
+          versionField.setAccessible(true);
+          this.clientVersion = versionField.getLong(method.getDeclaringClass());
+        } catch (NoSuchFieldException ex) {
+          throw new RuntimeException(ex);
+        } catch (IllegalAccessException ex) {
+          throw new RuntimeException(ex);
+        }
         this.clientMethodsHash = ProtocolSignature.getFingerprint(method
         this.clientMethodsHash = ProtocolSignature.getFingerprint(method
             .getDeclaringClass().getMethods());
             .getDeclaringClass().getMethods());
       }
       }
-      this.declaringClassProtocolName = 
-          RPC.getProtocolName(method.getDeclaringClass());
     }
     }
 
 
     /** The name of the method invoked. */
     /** The name of the method invoked. */
@@ -129,7 +103,6 @@ public class WritableRpcEngine implements RpcEngine {
       return clientVersion;
       return clientVersion;
     }
     }
 
 
-    @SuppressWarnings("unused")
     private int getClientMethodsHash() {
     private int getClientMethodsHash() {
       return clientMethodsHash;
       return clientMethodsHash;
     }
     }
@@ -142,10 +115,8 @@ public class WritableRpcEngine implements RpcEngine {
       return rpcVersion;
       return rpcVersion;
     }
     }
 
 
-    @SuppressWarnings("deprecation")
     public void readFields(DataInput in) throws IOException {
     public void readFields(DataInput in) throws IOException {
       rpcVersion = in.readLong();
       rpcVersion = in.readLong();
-      declaringClassProtocolName = UTF8.readString(in);
       methodName = UTF8.readString(in);
       methodName = UTF8.readString(in);
       clientVersion = in.readLong();
       clientVersion = in.readLong();
       clientMethodsHash = in.readInt();
       clientMethodsHash = in.readInt();
@@ -153,16 +124,13 @@ public class WritableRpcEngine implements RpcEngine {
       parameterClasses = new Class[parameters.length];
       parameterClasses = new Class[parameters.length];
       ObjectWritable objectWritable = new ObjectWritable();
       ObjectWritable objectWritable = new ObjectWritable();
       for (int i = 0; i < parameters.length; i++) {
       for (int i = 0; i < parameters.length; i++) {
-        parameters[i] = 
-            ObjectWritable.readObject(in, objectWritable, this.conf);
+        parameters[i] = ObjectWritable.readObject(in, objectWritable, this.conf);
         parameterClasses[i] = objectWritable.getDeclaredClass();
         parameterClasses[i] = objectWritable.getDeclaredClass();
       }
       }
     }
     }
 
 
-    @SuppressWarnings("deprecation")
     public void write(DataOutput out) throws IOException {
     public void write(DataOutput out) throws IOException {
       out.writeLong(rpcVersion);
       out.writeLong(rpcVersion);
-      UTF8.writeString(out, declaringClassProtocolName);
       UTF8.writeString(out, methodName);
       UTF8.writeString(out, methodName);
       out.writeLong(clientVersion);
       out.writeLong(clientVersion);
       out.writeInt(clientMethodsHash);
       out.writeInt(clientMethodsHash);
@@ -201,7 +169,7 @@ public class WritableRpcEngine implements RpcEngine {
 
 
   private static ClientCache CLIENTS=new ClientCache();
   private static ClientCache CLIENTS=new ClientCache();
   
   
-  private static class Invoker implements RpcInvocationHandler {
+  private static class Invoker implements InvocationHandler {
     private Client.ConnectionId remoteId;
     private Client.ConnectionId remoteId;
     private Client client;
     private Client client;
     private boolean isClosed = false;
     private boolean isClosed = false;
@@ -223,7 +191,7 @@ public class WritableRpcEngine implements RpcEngine {
       }
       }
 
 
       ObjectWritable value = (ObjectWritable)
       ObjectWritable value = (ObjectWritable)
-        client.call(RpcKind.RPC_WRITABLE, new Invocation(method, args), remoteId);
+        client.call(new Invocation(method, args), remoteId);
       if (LOG.isDebugEnabled()) {
       if (LOG.isDebugEnabled()) {
         long callTime = System.currentTimeMillis() - startTime;
         long callTime = System.currentTimeMillis() - startTime;
         LOG.debug("Call: " + method.getName() + " " + callTime);
         LOG.debug("Call: " + method.getName() + " " + callTime);
@@ -232,17 +200,12 @@ public class WritableRpcEngine implements RpcEngine {
     }
     }
     
     
     /* close the IPC client that's responsible for this invoker's RPCs */ 
     /* close the IPC client that's responsible for this invoker's RPCs */ 
-    synchronized public void close() {
+    synchronized private void close() {
       if (!isClosed) {
       if (!isClosed) {
         isClosed = true;
         isClosed = true;
         CLIENTS.stopClient(client);
         CLIENTS.stopClient(client);
       }
       }
     }
     }
-
-    @Override
-    public ConnectionId getConnectionId() {
-      return remoteId;
-    }
   }
   }
   
   
   // for unit testing only
   // for unit testing only
@@ -268,6 +231,15 @@ public class WritableRpcEngine implements RpcEngine {
             factory, rpcTimeout));
             factory, rpcTimeout));
     return new ProtocolProxy<T>(protocol, proxy, true);
     return new ProtocolProxy<T>(protocol, proxy, true);
   }
   }
+
+  /**
+   * Stop this proxy and release its invoker's resource
+   * @param proxy the proxy to be stopped
+   */
+  public void stopProxy(Object proxy) {
+    ((Invoker)Proxy.getInvocationHandler(proxy)).close();
+  }
+
   
   
   /** Expert: Make multiple, parallel calls to a set of servers. */
   /** Expert: Make multiple, parallel calls to a set of servers. */
   public Object[] call(Method method, Object[][] params,
   public Object[] call(Method method, Object[][] params,
@@ -301,238 +273,134 @@ public class WritableRpcEngine implements RpcEngine {
 
 
   /** Construct a server for a protocol implementation instance listening on a
   /** Construct a server for a protocol implementation instance listening on a
    * port and address. */
    * port and address. */
-  public RPC.Server getServer(Class<?> protocolClass,
-                      Object protocolImpl, String bindAddress, int port,
-                      int numHandlers, int numReaders, int queueSizePerHandler,
-                      boolean verbose, Configuration conf,
+  public Server getServer(Class<?> protocol,
+                          Object instance, String bindAddress, int port,
+                          int numHandlers, int numReaders, int queueSizePerHandler,
+                          boolean verbose, Configuration conf,
                       SecretManager<? extends TokenIdentifier> secretManager) 
                       SecretManager<? extends TokenIdentifier> secretManager) 
     throws IOException {
     throws IOException {
-    return new Server(protocolClass, protocolImpl, conf, bindAddress, port,
-        numHandlers, numReaders, queueSizePerHandler, verbose, secretManager);
+    return new Server(instance, conf, bindAddress, port, numHandlers, 
+        numReaders, queueSizePerHandler, verbose, secretManager);
   }
   }
 
 
-
   /** An RPC Server. */
   /** An RPC Server. */
   public static class Server extends RPC.Server {
   public static class Server extends RPC.Server {
-    /**
-     * Construct an RPC server.
-     * @param instance the instance whose methods will be called
-     * @param conf the configuration to use
-     * @param bindAddress the address to bind on to listen for connection
-     * @param port the port to listen for connections on
-     * 
-     * @deprecated Use #Server(Class, Object, Configuration, String, int)    
-     */
-    @Deprecated
-    public Server(Object instance, Configuration conf, String bindAddress,
-        int port) throws IOException {
-      this(null, instance, conf,  bindAddress, port);
-    }
-    
-    
+    private Object instance;
+    private boolean verbose;
+
     /** Construct an RPC server.
     /** Construct an RPC server.
-     * @param protocolClass class
-     * @param protocolImpl the instance whose methods will be called
+     * @param instance the instance whose methods will be called
      * @param conf the configuration to use
      * @param conf the configuration to use
      * @param bindAddress the address to bind on to listen for connection
      * @param bindAddress the address to bind on to listen for connection
      * @param port the port to listen for connections on
      * @param port the port to listen for connections on
      */
      */
-    public Server(Class<?> protocolClass, Object protocolImpl, 
-        Configuration conf, String bindAddress, int port) 
+    public Server(Object instance, Configuration conf, String bindAddress, int port) 
       throws IOException {
       throws IOException {
-      this(protocolClass, protocolImpl, conf,  bindAddress, port, 1, -1, -1,
-          false, null);
+      this(instance, conf,  bindAddress, port, 1, -1, -1, false, null);
     }
     }
     
     
-    /** 
-     * Construct an RPC server.
-     * @param protocolImpl the instance whose methods will be called
-     * @param conf the configuration to use
-     * @param bindAddress the address to bind on to listen for connection
-     * @param port the port to listen for connections on
-     * @param numHandlers the number of method handler threads to run
-     * @param verbose whether each call should be logged
-     * 
-     * @deprecated use Server#Server(Class, Object, 
-     *      Configuration, String, int, int, int, int, boolean, SecretManager)
-     */
-    @Deprecated
-    public Server(Object protocolImpl, Configuration conf, String bindAddress,
-        int port, int numHandlers, int numReaders, int queueSizePerHandler,
-        boolean verbose, SecretManager<? extends TokenIdentifier> secretManager) 
-            throws IOException {
-       this(null, protocolImpl,  conf,  bindAddress,   port,
-                   numHandlers,  numReaders,  queueSizePerHandler,  verbose, 
-                   secretManager);
-   
+    private static String classNameBase(String className) {
+      String[] names = className.split("\\.", -1);
+      if (names == null || names.length == 0) {
+        return className;
+      }
+      return names[names.length-1];
     }
     }
     
     
-    /** 
-     * Construct an RPC server.
-     * @param protocolClass - the protocol being registered
-     *     can be null for compatibility with old usage (see below for details)
-     * @param protocolImpl the protocol impl that will be called
+    /** Construct an RPC server.
+     * @param instance the instance whose methods will be called
      * @param conf the configuration to use
      * @param conf the configuration to use
      * @param bindAddress the address to bind on to listen for connection
      * @param bindAddress the address to bind on to listen for connection
      * @param port the port to listen for connections on
      * @param port the port to listen for connections on
      * @param numHandlers the number of method handler threads to run
      * @param numHandlers the number of method handler threads to run
      * @param verbose whether each call should be logged
      * @param verbose whether each call should be logged
      */
      */
-    public Server(Class<?> protocolClass, Object protocolImpl,
-        Configuration conf, String bindAddress,  int port,
-        int numHandlers, int numReaders, int queueSizePerHandler, 
-        boolean verbose, SecretManager<? extends TokenIdentifier> secretManager) 
+    public Server(Object instance, Configuration conf, String bindAddress,  int port,
+                  int numHandlers, int numReaders, int queueSizePerHandler, boolean verbose, 
+                  SecretManager<? extends TokenIdentifier> secretManager) 
         throws IOException {
         throws IOException {
-      super(bindAddress, port, null, numHandlers, numReaders,
+      super(bindAddress, port, Invocation.class, numHandlers, numReaders,
           queueSizePerHandler, conf,
           queueSizePerHandler, conf,
-          classNameBase(protocolImpl.getClass().getName()), secretManager);
-
+          classNameBase(instance.getClass().getName()), secretManager);
+      this.instance = instance;
       this.verbose = verbose;
       this.verbose = verbose;
-      
-      
-      Class<?>[] protocols;
-      if (protocolClass == null) { // derive protocol from impl
-        /*
-         * In order to remain compatible with the old usage where a single
-         * target protocolImpl is suppled for all protocol interfaces, and
-         * the protocolImpl is derived from the protocolClass(es) 
-         * we register all interfaces extended by the protocolImpl
-         */
-        protocols = RPC.getProtocolInterfaces(protocolImpl.getClass());
-
-      } else {
-        if (!protocolClass.isAssignableFrom(protocolImpl.getClass())) {
-          throw new IOException("protocolClass "+ protocolClass +
-              " is not implemented by protocolImpl which is of class " +
-              protocolImpl.getClass());
-        }
-        // register protocol class and its super interfaces
-        registerProtocolAndImpl(RpcKind.RPC_WRITABLE, protocolClass, protocolImpl);
-        protocols = RPC.getProtocolInterfaces(protocolClass);
-      }
-      for (Class<?> p : protocols) {
-        if (!p.equals(VersionedProtocol.class)) {
-          registerProtocolAndImpl(RpcKind.RPC_WRITABLE, p, protocolImpl);
-        }
-      }
-
     }
     }
 
 
-    private static void log(String value) {
-      if (value!= null && value.length() > 55)
-        value = value.substring(0, 55)+"...";
-      LOG.info(value);
-    }
-    
-    static class WritableRpcInvoker implements RpcInvoker {
-
-     @Override
-      public Writable call(org.apache.hadoop.ipc.RPC.Server server,
-          String protocolName, Writable rpcRequest, long receivedTime)
-          throws IOException {
-        try {
-          Invocation call = (Invocation)rpcRequest;
-          if (server.verbose) log("Call: " + call);
-
-          // Verify rpc version
-          if (call.getRpcVersion() != writableRpcVersion) {
-            // Client is using a different version of WritableRpc
-            throw new IOException(
-                "WritableRpc version mismatch, client side version="
-                    + call.getRpcVersion() + ", server side version="
-                    + writableRpcVersion);
-          }
-
+    public Writable call(Class<?> protocol, Writable param, long receivedTime) 
+    throws IOException {
+      try {
+        Invocation call = (Invocation)param;
+        if (verbose) log("Call: " + call);
+
+        Method method = protocol.getMethod(call.getMethodName(),
+                                           call.getParameterClasses());
+        method.setAccessible(true);
+
+        // Verify rpc version
+        if (call.getRpcVersion() != writableRpcVersion) {
+          // Client is using a different version of WritableRpc
+          throw new IOException(
+              "WritableRpc version mismatch, client side version="
+                  + call.getRpcVersion() + ", server side version="
+                  + writableRpcVersion);
+        }
+        
+        //Verify protocol version.
+        //Bypass the version check for VersionedProtocol
+        if (!method.getDeclaringClass().equals(VersionedProtocol.class)) {
           long clientVersion = call.getProtocolVersion();
           long clientVersion = call.getProtocolVersion();
-          final String protoName;
-          ProtoClassProtoImpl protocolImpl;
-          if (call.declaringClassProtocolName.equals(VersionedProtocol.class.getName())) {
-            // VersionProtocol methods are often used by client to figure out
-            // which version of protocol to use.
-            //
-            // Versioned protocol methods should go the protocolName protocol
-            // rather than the declaring class of the method since the
-            // the declaring class is VersionedProtocol which is not 
-            // registered directly.
-            // Send the call to the highest  protocol version
-            VerProtocolImpl highest = server.getHighestSupportedProtocol(
-                RpcKind.RPC_WRITABLE, protocolName);
-            if (highest == null) {
-              throw new IOException("Unknown protocol: " + protocolName);
-            }
-            protocolImpl = highest.protocolTarget;
-          } else {
-            protoName = call.declaringClassProtocolName;
-
-            // Find the right impl for the protocol based on client version.
-            ProtoNameVer pv = 
-                new ProtoNameVer(call.declaringClassProtocolName, clientVersion);
-            protocolImpl = 
-                server.getProtocolImplMap(RpcKind.RPC_WRITABLE).get(pv);
-            if (protocolImpl == null) { // no match for Protocol AND Version
-               VerProtocolImpl highest = 
-                   server.getHighestSupportedProtocol(RpcKind.RPC_WRITABLE, 
-                       protoName);
-              if (highest == null) {
-                throw new IOException("Unknown protocol: " + protoName);
-              } else { // protocol supported but not the version that client wants
-                throw new RPC.VersionMismatch(protoName, clientVersion,
-                  highest.version);
-              }
-            }
-          }
-          
-
-          // Invoke the protocol method
-
-          long startTime = System.currentTimeMillis();
-          Method method = 
-              protocolImpl.protocolClass.getMethod(call.getMethodName(),
-              call.getParameterClasses());
-          method.setAccessible(true);
-          server.rpcDetailedMetrics.init(protocolImpl.protocolClass);
-          Object value = 
-              method.invoke(protocolImpl.protocolImpl, call.getParameters());
-          int processingTime = (int) (System.currentTimeMillis() - startTime);
-          int qTime = (int) (startTime-receivedTime);
-          if (LOG.isDebugEnabled()) {
-            LOG.debug("Served: " + call.getMethodName() +
-                      " queueTime= " + qTime +
-                      " procesingTime= " + processingTime);
+          ProtocolSignature serverInfo = ((VersionedProtocol) instance)
+              .getProtocolSignature(protocol.getCanonicalName(), call
+                  .getProtocolVersion(), call.getClientMethodsHash());
+          long serverVersion = serverInfo.getVersion();
+          if (serverVersion != clientVersion) {
+            LOG.warn("Version mismatch: client version=" + clientVersion
+                + ", server version=" + serverVersion);
+            throw new RPC.VersionMismatch(protocol.getName(), clientVersion,
+                serverVersion);
           }
           }
-          server.rpcMetrics.addRpcQueueTime(qTime);
-          server.rpcMetrics.addRpcProcessingTime(processingTime);
-          server.rpcDetailedMetrics.addProcessingTime(call.getMethodName(),
-                                               processingTime);
-          if (server.verbose) log("Return: "+value);
-
-          return new ObjectWritable(method.getReturnType(), value);
-
-        } catch (InvocationTargetException e) {
-          Throwable target = e.getTargetException();
-          if (target instanceof IOException) {
-            throw (IOException)target;
-          } else {
-            IOException ioe = new IOException(target.toString());
-            ioe.setStackTrace(target.getStackTrace());
-            throw ioe;
-          }
-        } catch (Throwable e) {
-          if (!(e instanceof IOException)) {
-            LOG.error("Unexpected throwable object ", e);
-          }
-          IOException ioe = new IOException(e.toString());
-          ioe.setStackTrace(e.getStackTrace());
+        }
+
+        long startTime = System.currentTimeMillis();
+        Object value = method.invoke(instance, call.getParameters());
+        int processingTime = (int) (System.currentTimeMillis() - startTime);
+        int qTime = (int) (startTime-receivedTime);
+        if (LOG.isDebugEnabled()) {
+          LOG.debug("Served: " + call.getMethodName() +
+                    " queueTime= " + qTime +
+                    " procesingTime= " + processingTime);
+        }
+        rpcMetrics.addRpcQueueTime(qTime);
+        rpcMetrics.addRpcProcessingTime(processingTime);
+        rpcDetailedMetrics.addProcessingTime(call.getMethodName(),
+                                             processingTime);
+        if (verbose) log("Return: "+value);
+
+        return new ObjectWritable(method.getReturnType(), value);
+
+      } catch (InvocationTargetException e) {
+        Throwable target = e.getTargetException();
+        if (target instanceof IOException) {
+          throw (IOException)target;
+        } else {
+          IOException ioe = new IOException(target.toString());
+          ioe.setStackTrace(target.getStackTrace());
           throw ioe;
           throw ioe;
         }
         }
+      } catch (Throwable e) {
+        if (!(e instanceof IOException)) {
+          LOG.error("Unexpected throwable object ", e);
+        }
+        IOException ioe = new IOException(e.toString());
+        ioe.setStackTrace(e.getStackTrace());
+        throw ioe;
       }
       }
     }
     }
   }
   }
 
 
-  @Override
-  public ProtocolProxy<ProtocolMetaInfoPB> getProtocolMetaInfoProxy(
-      ConnectionId connId, Configuration conf, SocketFactory factory)
-      throws IOException {
-    throw new UnsupportedOperationException("This proxy is not supported");
+  private static void log(String value) {
+    if (value!= null && value.length() > 55)
+      value = value.substring(0, 55)+"...";
+    LOG.info(value);
   }
   }
 }
 }

+ 1 - 10
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/metrics2/util/MBeans.java

@@ -18,8 +18,6 @@
 package org.apache.hadoop.metrics2.util;
 package org.apache.hadoop.metrics2.util;
 
 
 import java.lang.management.ManagementFactory;
 import java.lang.management.ManagementFactory;
-
-import javax.management.InstanceAlreadyExistsException;
 import javax.management.MBeanServer;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 import javax.management.ObjectName;
 
 
@@ -57,15 +55,8 @@ public class MBeans {
       mbs.registerMBean(theMbean, name);
       mbs.registerMBean(theMbean, name);
       LOG.debug("Registered "+ name);
       LOG.debug("Registered "+ name);
       return name;
       return name;
-    } catch (InstanceAlreadyExistsException iaee) {
-      if (LOG.isTraceEnabled()) {
-        LOG.trace("Failed to register MBean \""+ name + "\"", iaee);
-      } else {
-        LOG.warn("Failed to register MBean \""+ name
-            + "\": Instance already exists.");
-      }
     } catch (Exception e) {
     } catch (Exception e) {
-      LOG.warn("Failed to register MBean \""+ name + "\"", e);
+      LOG.warn("Error registering "+ name, e);
     }
     }
     return null;
     return null;
   }
   }

+ 27 - 64
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/DNS.java

@@ -90,83 +90,48 @@ public class DNS {
     return attribute.get("PTR").get().toString();
     return attribute.get("PTR").get().toString();
   }
   }
 
 
-  /**
-   * @return NetworkInterface for the given subinterface name (eg eth0:0)
-   *    or null if no interface with the given name can be found  
-   */
-  private static NetworkInterface getSubinterface(String strInterface)
-      throws SocketException {
-    Enumeration<NetworkInterface> nifs = 
-      NetworkInterface.getNetworkInterfaces();
-      
-    while (nifs.hasMoreElements()) {
-      Enumeration<NetworkInterface> subNifs = 
-        nifs.nextElement().getSubInterfaces();
-
-      while (subNifs.hasMoreElements()) {
-        NetworkInterface nif = subNifs.nextElement();
-        if (nif.getName().equals(strInterface)) {
-          return nif;
-        }
-      }
-    }
-    return null;
-  }
-
   /**
   /**
    * Returns all the IPs associated with the provided interface, if any, in
    * Returns all the IPs associated with the provided interface, if any, in
    * textual form.
    * textual form.
    * 
    * 
    * @param strInterface
    * @param strInterface
-   *            The name of the network interface or sub-interface to query
-   *            (eg eth0 or eth0:0) or the string "default"
+   *            The name of the network interface to query (e.g. eth0)
    * @return A string vector of all the IPs associated with the provided
    * @return A string vector of all the IPs associated with the provided
-   *         interface. The local host IP is returned if the interface
-   *         name "default" is specified or there is an I/O error looking
-   *         for the given interface.
+   *         interface
    * @throws UnknownHostException
    * @throws UnknownHostException
-   *             If the given interface is invalid
+   *             If an UnknownHostException is encountered in querying the
+   *             default interface
    * 
    * 
    */
    */
   public static String[] getIPs(String strInterface)
   public static String[] getIPs(String strInterface)
     throws UnknownHostException {
     throws UnknownHostException {
-    if ("default".equals(strInterface)) {
-      return new String[] { cachedHostAddress };
-    }
-    NetworkInterface netIf;
     try {
     try {
-      netIf = NetworkInterface.getByName(strInterface);
-      if (netIf == null) {
-        netIf = getSubinterface(strInterface);
+      NetworkInterface netIF = NetworkInterface.getByName(strInterface);
+      if (netIF == null) {
+        return new String[] { cachedHostAddress };
+      } else {
+        Vector<String> ips = new Vector<String>();
+        Enumeration e = netIF.getInetAddresses();
+        while (e.hasMoreElements()) {
+          ips.add(((InetAddress) e.nextElement()).getHostAddress());
+        }
+        return ips.toArray(new String[] {});
       }
       }
     } catch (SocketException e) {
     } catch (SocketException e) {
-      LOG.warn("I/O error finding interface " + strInterface +
-          ": " + e.getMessage());
-      return new String[] { cachedHostAddress };
-    }
-    if (netIf == null) {
-      throw new UnknownHostException("No such interface " + strInterface);
+      return new String[]  { cachedHostAddress };
     }
     }
-    Vector<String> ips = new Vector<String>();
-    Enumeration<InetAddress> addrs = netIf.getInetAddresses();
-    while (addrs.hasMoreElements()) {
-      ips.add(addrs.nextElement().getHostAddress());
-    }
-    return ips.toArray(new String[] {});
   }
   }
 
 
 
 
-  /**
+    /**
    * Returns the first available IP address associated with the provided
    * Returns the first available IP address associated with the provided
-   * network interface or the local host IP if "default" is given.
+   * network interface
    *
    *
    * @param strInterface
    * @param strInterface
-   *            The name of the network interface or subinterface to query
-   *             (e.g. eth0 or eth0:0) or the string "default"
-   * @return The IP address in text form, the local host IP is returned
-   *         if the interface name "default" is specified
+   *            The name of the network interface to query (e.g. eth0)
+   * @return The IP address in text form
    * @throws UnknownHostException
    * @throws UnknownHostException
-   *             If the given interface is invalid
+   *             If one is encountered in querying the default interface
    */
    */
   public static String getDefaultIP(String strInterface)
   public static String getDefaultIP(String strInterface)
     throws UnknownHostException {
     throws UnknownHostException {
@@ -179,28 +144,26 @@ public class DNS {
    * address bound to the specified network interface
    * address bound to the specified network interface
    *
    *
    * @param strInterface
    * @param strInterface
-   *            The name of the network interface or subinterface to query
-   *            (e.g. eth0 or eth0:0)
+   *            The name of the network interface to query (e.g. eth0)
    * @param nameserver
    * @param nameserver
    *            The DNS host name
    *            The DNS host name
    * @return A string vector of all host names associated with the IPs tied to
    * @return A string vector of all host names associated with the IPs tied to
    *         the specified interface
    *         the specified interface
-   * @throws UnknownHostException if the given interface is invalid
+   * @throws UnknownHostException if the hostname cannot be determined
    */
    */
   public static String[] getHosts(String strInterface, String nameserver)
   public static String[] getHosts(String strInterface, String nameserver)
     throws UnknownHostException {
     throws UnknownHostException {
     String[] ips = getIPs(strInterface);
     String[] ips = getIPs(strInterface);
     Vector<String> hosts = new Vector<String>();
     Vector<String> hosts = new Vector<String>();
-    for (int ctr = 0; ctr < ips.length; ctr++) {
+    for (int ctr = 0; ctr < ips.length; ctr++)
       try {
       try {
         hosts.add(reverseDns(InetAddress.getByName(ips[ctr]),
         hosts.add(reverseDns(InetAddress.getByName(ips[ctr]),
                              nameserver));
                              nameserver));
       } catch (UnknownHostException ignored) {
       } catch (UnknownHostException ignored) {
       } catch (NamingException ignored) {
       } catch (NamingException ignored) {
       }
       }
-    }
+
     if (hosts.isEmpty()) {
     if (hosts.isEmpty()) {
-      LOG.warn("Unable to determine hostname for interface " + strInterface);
       return new String[] { cachedHostname };
       return new String[] { cachedHostname };
     } else {
     } else {
       return hosts.toArray(new String[hosts.size()]);
       return hosts.toArray(new String[hosts.size()]);
@@ -218,8 +181,8 @@ public class DNS {
     try {
     try {
       localhost = InetAddress.getLocalHost().getCanonicalHostName();
       localhost = InetAddress.getLocalHost().getCanonicalHostName();
     } catch (UnknownHostException e) {
     } catch (UnknownHostException e) {
-      LOG.warn("Unable to determine local hostname "
-          + "-falling back to \"" + LOCALHOST + "\"", e);
+      LOG.info("Unable to determine local hostname "
+              + "-falling back to \"" + LOCALHOST + "\"", e);
       localhost = LOCALHOST;
       localhost = LOCALHOST;
     }
     }
     return localhost;
     return localhost;
@@ -241,7 +204,7 @@ public class DNS {
       try {
       try {
         address = InetAddress.getLocalHost().getHostAddress();
         address = InetAddress.getLocalHost().getHostAddress();
       } catch (UnknownHostException e) {
       } catch (UnknownHostException e) {
-        LOG.warn("Unable to determine address of the host"
+        LOG.info("Unable to determine address of the host"
                 + "-falling back to \"" + LOCALHOST + "\" address", e);
                 + "-falling back to \"" + LOCALHOST + "\" address", e);
         try {
         try {
           address = InetAddress.getByName(LOCALHOST).getHostAddress();
           address = InetAddress.getByName(LOCALHOST).getHostAddress();

+ 0 - 7
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetUtils.java

@@ -606,13 +606,6 @@ public class NetUtils {
     catch(UnknownHostException uhe) {return "" + uhe;}
     catch(UnknownHostException uhe) {return "" + uhe;}
   }
   }
   
   
-  /**
-   * Compose a "host:port" string from the address.
-   */
-  public static String getHostPortString(InetSocketAddress addr) {
-    return addr.getHostName() + ":" + addr.getPort();
-  }
-  
   /**
   /**
    * Checks if {@code host} is a local host name and return {@link InetAddress}
    * Checks if {@code host} is a local host name and return {@link InetAddress}
    * corresponding to that address.
    * corresponding to that address.

+ 0 - 23
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/NetworkTopology.java

@@ -45,13 +45,6 @@ public class NetworkTopology {
   public static final Log LOG = 
   public static final Log LOG = 
     LogFactory.getLog(NetworkTopology.class);
     LogFactory.getLog(NetworkTopology.class);
     
     
-  public static class InvalidTopologyException extends RuntimeException {
-    private static final long serialVersionUID = 1L;
-    public InvalidTopologyException(String msg) {
-      super(msg);
-    }
-  }
-
   /** InnerNode represents a switch/router of a data center or rack.
   /** InnerNode represents a switch/router of a data center or rack.
    * Different from a leaf node, it has non-null children.
    * Different from a leaf node, it has non-null children.
    */
    */
@@ -318,8 +311,6 @@ public class NetworkTopology {
    * the root cluster map
    * the root cluster map
    */
    */
   InnerNode clusterMap = new InnerNode(InnerNode.ROOT);
   InnerNode clusterMap = new InnerNode(InnerNode.ROOT);
-  /** Depth of all leaf nodes */
-  private int depthOfAllLeaves = -1;
   /** rack counter */
   /** rack counter */
   private int numOfRacks = 0;
   private int numOfRacks = 0;
   /** the lock used to manage access */
   /** the lock used to manage access */
@@ -337,7 +328,6 @@ public class NetworkTopology {
    */
    */
   public void add(Node node) {
   public void add(Node node) {
     if (node==null) return;
     if (node==null) return;
-    String oldTopoStr = this.toString();
     if( node instanceof InnerNode ) {
     if( node instanceof InnerNode ) {
       throw new IllegalArgumentException(
       throw new IllegalArgumentException(
         "Not allow to add an inner node: "+NodeBase.getPath(node));
         "Not allow to add an inner node: "+NodeBase.getPath(node));
@@ -355,19 +345,6 @@ public class NetworkTopology {
         if (rack == null) {
         if (rack == null) {
           numOfRacks++;
           numOfRacks++;
         }
         }
-        if (!(node instanceof InnerNode)) {
-          if (depthOfAllLeaves == -1) {
-            depthOfAllLeaves = node.getLevel();
-          } else {
-            if (depthOfAllLeaves != node.getLevel()) {
-              LOG.error("Error: can't add leaf node at depth " +
-                  node.getLevel() + " to topology:\n" + oldTopoStr);
-              throw new InvalidTopologyException("Invalid network topology. " +
-                  "You cannot have a rack and a non-rack node at the same " +
-                  "level of the network topology.");
-            }
-          }
-        }
       }
       }
       if(LOG.isDebugEnabled()) {
       if(LOG.isDebugEnabled()) {
         LOG.debug("NetworkTopology became:\n" + this.toString());
         LOG.debug("NetworkTopology became:\n" + this.toString());

+ 0 - 147
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/net/TableMapping.java

@@ -1,147 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.net;
-
-import static org.apache.hadoop.fs.CommonConfigurationKeysPublic.NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY;
-
-import java.io.BufferedReader;
-import java.io.FileReader;
-import java.io.IOException;
-import java.util.ArrayList;
-import java.util.HashMap;
-import java.util.List;
-import java.util.Map;
-
-import org.apache.commons.lang.StringUtils;
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configuration;
-import org.apache.hadoop.conf.Configured;
-
-/**
- * <p>
- * Simple {@link DNSToSwitchMapping} implementation that reads a 2 column text
- * file. The columns are separated by whitespace. The first column is a DNS or
- * IP address and the second column specifies the rack where the address maps.
- * </p>
- * <p>
- * This class uses the configuration parameter {@code
- * net.topology.table.file.name} to locate the mapping file.
- * </p>
- * <p>
- * Calls to {@link #resolve(List)} will look up the address as defined in the
- * mapping file. If no entry corresponding to the address is found, the value
- * {@code /default-rack} is returned.
- * </p>
- */
-@InterfaceAudience.Public
-@InterfaceStability.Evolving
-public class TableMapping extends CachedDNSToSwitchMapping {
-
-  private static final Log LOG = LogFactory.getLog(TableMapping.class);
-  
-  public TableMapping() {
-    super(new RawTableMapping());
-  }
-  
-  private RawTableMapping getRawMapping() {
-    return (RawTableMapping) rawMapping;
-  }
-
-  @Override
-  public Configuration getConf() {
-    return getRawMapping().getConf();
-  }
-
-  @Override
-  public void setConf(Configuration conf) {
-    super.setConf(conf);
-    getRawMapping().setConf(conf);
-  }
-  
-  private static final class RawTableMapping extends Configured
-      implements DNSToSwitchMapping {
-    
-    private final Map<String, String> map = new HashMap<String, String>();
-    private boolean initialized = false;
-  
-    private synchronized void load() {
-      map.clear();
-  
-      String filename = getConf().get(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY, null);
-      if (StringUtils.isBlank(filename)) {
-        LOG.warn(NET_TOPOLOGY_TABLE_MAPPING_FILE_KEY + " not configured. "
-            + NetworkTopology.DEFAULT_RACK + " will be returned.");
-        return;
-      }
-  
-      BufferedReader reader = null;
-      try {
-        reader = new BufferedReader(new FileReader(filename));
-        String line = reader.readLine();
-        while (line != null) {
-          line = line.trim();
-          if (line.length() != 0 && line.charAt(0) != '#') {
-            String[] columns = line.split("\\s+");
-            if (columns.length == 2) {
-              map.put(columns[0], columns[1]);
-            } else {
-              LOG.warn("Line does not have two columns. Ignoring. " + line);
-            }
-          }
-          line = reader.readLine();
-        }
-      } catch (Exception e) {
-        LOG.warn(filename + " cannot be read. " + NetworkTopology.DEFAULT_RACK
-            + " will be returned.", e);
-        map.clear();
-      } finally {
-        if (reader != null) {
-          try {
-            reader.close();
-          } catch (IOException e) {
-            LOG.warn(filename + " cannot be read. "
-                + NetworkTopology.DEFAULT_RACK + " will be returned.", e);
-            map.clear();
-          }
-        }
-      }
-    }
-  
-    public synchronized List<String> resolve(List<String> names) {
-      if (!initialized) {
-        initialized = true;
-        load();
-      }
-  
-      List<String> results = new ArrayList<String>(names.size());
-      for (String name : names) {
-        String result = map.get(name);
-        if (result != null) {
-          results.add(result);
-        } else {
-          results.add(NetworkTopology.DEFAULT_RACK);
-        }
-      }
-      return results;
-    }
-    
-  }
-}

+ 1 - 40
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/AuthenticationFilterInitializer.java

@@ -17,16 +17,11 @@
  */
  */
 package org.apache.hadoop.security;
 package org.apache.hadoop.security;
 
 
-import org.apache.hadoop.http.HttpServer;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.security.authentication.server.AuthenticationFilter;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterContainer;
 import org.apache.hadoop.http.FilterInitializer;
 import org.apache.hadoop.http.FilterInitializer;
-import org.apache.hadoop.security.authentication.server.KerberosAuthenticationHandler;
 
 
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.Reader;
 import java.util.HashMap;
 import java.util.HashMap;
 import java.util.Map;
 import java.util.Map;
 
 
@@ -45,9 +40,7 @@ import java.util.Map;
  */
  */
 public class AuthenticationFilterInitializer extends FilterInitializer {
 public class AuthenticationFilterInitializer extends FilterInitializer {
 
 
-  static final String PREFIX = "hadoop.http.authentication.";
-
-  static final String SIGNATURE_SECRET_FILE = AuthenticationFilter.SIGNATURE_SECRET + ".file";
+  private static final String PREFIX = "hadoop.http.authentication.";
 
 
   /**
   /**
    * Initializes hadoop-auth AuthenticationFilter.
    * Initializes hadoop-auth AuthenticationFilter.
@@ -74,38 +67,6 @@ public class AuthenticationFilterInitializer extends FilterInitializer {
       }
       }
     }
     }
 
 
-    String signatureSecretFile = filterConfig.get(SIGNATURE_SECRET_FILE);
-    if (signatureSecretFile == null) {
-      throw new RuntimeException("Undefined property: " + SIGNATURE_SECRET_FILE);      
-    }
-    
-    try {
-      StringBuilder secret = new StringBuilder();
-      Reader reader = new FileReader(signatureSecretFile);
-      int c = reader.read();
-      while (c > -1) {
-        secret.append((char)c);
-        c = reader.read();
-      }
-      reader.close();
-      filterConfig.put(AuthenticationFilter.SIGNATURE_SECRET, secret.toString());
-    } catch (IOException ex) {
-      throw new RuntimeException("Could not read HTTP signature secret file: " + signatureSecretFile);            
-    }
-
-    //Resolve _HOST into bind address
-    String bindAddress = conf.get(HttpServer.BIND_ADDRESS);
-    String principal = filterConfig.get(KerberosAuthenticationHandler.PRINCIPAL);
-    if (principal != null) {
-      try {
-        principal = SecurityUtil.getServerPrincipal(principal, bindAddress);
-      }
-      catch (IOException ex) {
-        throw new RuntimeException("Could not resolve Kerberos principal name: " + ex.toString(), ex);
-      }
-      filterConfig.put(KerberosAuthenticationHandler.PRINCIPAL, principal);
-    }
-
     container.addFilter("authentication",
     container.addFilter("authentication",
                         AuthenticationFilter.class.getName(),
                         AuthenticationFilter.class.getName(),
                         filterConfig);
                         filterConfig);

+ 0 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Groups.java

@@ -86,9 +86,6 @@ public class Groups {
     
     
     // Create and cache user's groups
     // Create and cache user's groups
     groups = new CachedGroups(impl.getGroups(user));
     groups = new CachedGroups(impl.getGroups(user));
-    if (groups.getGroups().isEmpty()) {
-      throw new IOException("No groups found for user " + user);
-    }
     userToGroupsMap.put(user, groups);
     userToGroupsMap.put(user, groups);
     if(LOG.isDebugEnabled()) {
     if(LOG.isDebugEnabled()) {
       LOG.debug("Returning fetched groups for '" + user + "'");
       LOG.debug("Returning fetched groups for '" + user + "'");

+ 5 - 3
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/HadoopKerberosName.java

@@ -24,7 +24,9 @@ import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.conf.Configuration;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
+
+import sun.security.krb5.Config;
+import sun.security.krb5.KrbException;
 
 
 /**
 /**
  * This class implements parsing and handling of Kerberos principal names. In 
  * This class implements parsing and handling of Kerberos principal names. In 
@@ -38,8 +40,8 @@ public class HadoopKerberosName extends KerberosName {
 
 
   static {
   static {
     try {
     try {
-      KerberosUtil.getDefaultRealm();
-    } catch (Exception ke) {
+      Config.getInstance().getDefaultRealm();
+    } catch (KrbException ke) {
       if(UserGroupInformation.isSecurityEnabled())
       if(UserGroupInformation.isSecurityEnabled())
         throw new IllegalArgumentException("Can't get Kerberos configuration",ke);
         throw new IllegalArgumentException("Can't get Kerberos configuration",ke);
     }
     }

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/Krb5AndCertsSslSocketConnector.java

@@ -58,7 +58,7 @@ public class Krb5AndCertsSslSocketConnector extends SslSocketConnector {
     Collections.unmodifiableList(Collections.singletonList(
     Collections.unmodifiableList(Collections.singletonList(
           "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"));
           "TLS_KRB5_WITH_3DES_EDE_CBC_SHA"));
   static {
   static {
-    SecurityUtil.initKrb5CipherSuites();
+    System.setProperty("https.cipherSuites", KRB5_CIPHER_SUITES.get(0));
   }
   }
   
   
   private static final Log LOG = LogFactory
   private static final Log LOG = LogFactory

+ 0 - 321
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/LdapGroupsMapping.java

@@ -1,321 +0,0 @@
-/**
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.security;
-
-import java.io.FileReader;
-import java.io.IOException;
-import java.io.Reader;
-import java.util.ArrayList;
-import java.util.Hashtable;
-import java.util.List;
-
-import javax.naming.Context;
-import javax.naming.NamingEnumeration;
-import javax.naming.NamingException;
-import javax.naming.directory.Attribute;
-import javax.naming.directory.DirContext;
-import javax.naming.directory.InitialDirContext;
-import javax.naming.directory.SearchControls;
-import javax.naming.directory.SearchResult;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-import org.apache.hadoop.classification.InterfaceAudience;
-import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.conf.Configurable;
-import org.apache.hadoop.conf.Configuration;
-
-/**
- * An implementation of {@link GroupMappingServiceProvider} which
- * connects directly to an LDAP server for determining group membership.
- * 
- * This provider should be used only if it is necessary to map users to
- * groups that reside exclusively in an Active Directory or LDAP installation.
- * The common case for a Hadoop installation will be that LDAP users and groups
- * materialized on the Unix servers, and for an installation like that,
- * ShellBasedUnixGroupsMapping is preferred. However, in cases where
- * those users and groups aren't materialized in Unix, but need to be used for
- * access control, this class may be used to communicate directly with the LDAP
- * server.
- * 
- * It is important to note that resolving group mappings will incur network
- * traffic, and may cause degraded performance, although user-group mappings
- * will be cached via the infrastructure provided by {@link Groups}.
- * 
- * This implementation does not support configurable search limits. If a filter
- * is used for searching users or groups which returns more results than are
- * allowed by the server, an exception will be thrown.
- * 
- * The implementation also does not attempt to resolve group hierarchies. In
- * order to be considered a member of a group, the user must be an explicit
- * member in LDAP.
- */
-@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
-@InterfaceStability.Evolving
-public class LdapGroupsMapping
-    implements GroupMappingServiceProvider, Configurable {
-  
-  public static final String LDAP_CONFIG_PREFIX = "hadoop.security.group.mapping.ldap";
-
-  /*
-   * URL of the LDAP server
-   */
-  public static final String LDAP_URL_KEY = LDAP_CONFIG_PREFIX + ".url";
-  public static final String LDAP_URL_DEFAULT = "";
-
-  /*
-   * Should SSL be used to connect to the server
-   */
-  public static final String LDAP_USE_SSL_KEY = LDAP_CONFIG_PREFIX + ".ssl";
-  public static final Boolean LDAP_USE_SSL_DEFAULT = false;
-
-  /*
-   * File path to the location of the SSL keystore to use
-   */
-  public static final String LDAP_KEYSTORE_KEY = LDAP_CONFIG_PREFIX + ".ssl.keystore";
-  public static final String LDAP_KEYSTORE_DEFAULT = "";
-
-  /*
-   * Password for the keystore
-   */
-  public static final String LDAP_KEYSTORE_PASSWORD_KEY = LDAP_CONFIG_PREFIX + ".ssl.keystore.password";
-  public static final String LDAP_KEYSTORE_PASSWORD_DEFAULT = "";
-  
-  public static final String LDAP_KEYSTORE_PASSWORD_FILE_KEY = LDAP_KEYSTORE_PASSWORD_KEY + ".file";
-  public static final String LDAP_KEYSTORE_PASSWORD_FILE_DEFAULT = "";
-
-  /*
-   * User to bind to the LDAP server with
-   */
-  public static final String BIND_USER_KEY = LDAP_CONFIG_PREFIX + ".bind.user";
-  public static final String BIND_USER_DEFAULT = "";
-
-  /*
-   * Password for the bind user
-   */
-  public static final String BIND_PASSWORD_KEY = LDAP_CONFIG_PREFIX + ".bind.password";
-  public static final String BIND_PASSWORD_DEFAULT = "";
-  
-  public static final String BIND_PASSWORD_FILE_KEY = BIND_PASSWORD_KEY + ".file";
-  public static final String BIND_PASSWORD_FILE_DEFAULT = "";
-
-  /*
-   * Base distinguished name to use for searches
-   */
-  public static final String BASE_DN_KEY = LDAP_CONFIG_PREFIX + ".base";
-  public static final String BASE_DN_DEFAULT = "";
-
-  /*
-   * Any additional filters to apply when searching for users
-   */
-  public static final String USER_SEARCH_FILTER_KEY = LDAP_CONFIG_PREFIX + ".search.filter.user";
-  public static final String USER_SEARCH_FILTER_DEFAULT = "(&(objectClass=user)(sAMAccountName={0}))";
-
-  /*
-   * Any additional filters to apply when finding relevant groups
-   */
-  public static final String GROUP_SEARCH_FILTER_KEY = LDAP_CONFIG_PREFIX + ".search.filter.group";
-  public static final String GROUP_SEARCH_FILTER_DEFAULT = "(objectClass=group)";
-
-  /*
-   * LDAP attribute to use for determining group membership
-   */
-  public static final String GROUP_MEMBERSHIP_ATTR_KEY = LDAP_CONFIG_PREFIX + ".search.attr.member";
-  public static final String GROUP_MEMBERSHIP_ATTR_DEFAULT = "member";
-
-  /*
-   * LDAP attribute to use for identifying a group's name
-   */
-  public static final String GROUP_NAME_ATTR_KEY = LDAP_CONFIG_PREFIX + ".search.attr.group.name";
-  public static final String GROUP_NAME_ATTR_DEFAULT = "cn";
-  
-  private static final Log LOG = LogFactory.getLog(LdapGroupsMapping.class);
-
-  private static final SearchControls SEARCH_CONTROLS = new SearchControls();
-  static {
-    SEARCH_CONTROLS.setSearchScope(SearchControls.SUBTREE_SCOPE);
-  }
-
-  private DirContext ctx;
-  private Configuration conf;
-  
-  private String ldapUrl;
-  private boolean useSsl;
-  private String keystore;
-  private String keystorePass;
-  private String bindUser;
-  private String bindPassword;
-  private String baseDN;
-  private String groupSearchFilter;
-  private String userSearchFilter;
-  private String groupMemberAttr;
-  private String groupNameAttr;
-
-  /**
-   * Returns list of groups for a user.
-   * 
-   * The LdapCtx which underlies the DirContext object is not thread-safe, so
-   * we need to block around this whole method. The caching infrastructure will
-   * ensure that performance stays in an acceptable range.
-   *
-   * @param user get groups for this user
-   * @return list of groups for a given user
-   */
-  @Override
-  public synchronized List<String> getGroups(String user) throws IOException {
-    List<String> groups = new ArrayList<String>();
-
-    try {
-      DirContext ctx = getDirContext();
-
-      // Search for the user. We'll only ever need to look at the first result
-      NamingEnumeration<SearchResult> results = ctx.search(baseDN,
-                                                           userSearchFilter,
-                                                           new Object[]{user},
-                                                           SEARCH_CONTROLS);
-      if (results.hasMoreElements()) {
-        SearchResult result = results.nextElement();
-        String userDn = result.getNameInNamespace();
-
-        NamingEnumeration<SearchResult> groupResults =
-          ctx.search(baseDN,
-                     "(&" + groupSearchFilter + "(" + groupMemberAttr + "={0}))",
-                     new Object[]{userDn},
-                     SEARCH_CONTROLS);
-        while (groupResults.hasMoreElements()) {
-          SearchResult groupResult = groupResults.nextElement();
-          Attribute groupName = groupResult.getAttributes().get(groupNameAttr);
-          groups.add(groupName.get().toString());
-        }
-      }
-    } catch (NamingException e) {
-      LOG.warn("Exception trying to get groups for user " + user, e);
-      return new ArrayList<String>();
-    }
-
-    return groups;
-  }
-
-  @SuppressWarnings("deprecation")
-  DirContext getDirContext() throws NamingException {
-    if (ctx == null) {
-      // Set up the initial environment for LDAP connectivity
-      Hashtable<String, String> env = new Hashtable<String, String>();
-      env.put(Context.INITIAL_CONTEXT_FACTORY,
-          com.sun.jndi.ldap.LdapCtxFactory.class.getName());
-      env.put(Context.PROVIDER_URL, ldapUrl);
-      env.put(Context.SECURITY_AUTHENTICATION, "simple");
-
-      // Set up SSL security, if necessary
-      if (useSsl) {
-        env.put(Context.SECURITY_PROTOCOL, "ssl");
-        System.setProperty("javax.net.ssl.keyStore", keystore);
-        System.setProperty("javax.net.ssl.keyStorePassword", keystorePass);
-      }
-
-      env.put(Context.SECURITY_PRINCIPAL, bindUser);
-      env.put(Context.SECURITY_CREDENTIALS, bindPassword);
-
-      ctx = new InitialDirContext(env);
-    }
-
-    return ctx;
-  }
-
-  /**
-   * Caches groups, no need to do that for this provider
-   */
-  @Override
-  public void cacheGroupsRefresh() throws IOException {
-    // does nothing in this provider of user to groups mapping
-  }
-
-  /** 
-   * Adds groups to cache, no need to do that for this provider
-   *
-   * @param groups unused
-   */
-  @Override
-  public void cacheGroupsAdd(List<String> groups) throws IOException {
-    // does nothing in this provider of user to groups mapping
-  }
-
-  @Override
-  public synchronized Configuration getConf() {
-    return conf;
-  }
-
-  @Override
-  public synchronized void setConf(Configuration conf) {
-    ldapUrl = conf.get(LDAP_URL_KEY, LDAP_URL_DEFAULT);
-    if (ldapUrl == null || ldapUrl.isEmpty()) {
-      throw new RuntimeException("LDAP URL is not configured");
-    }
-    
-    useSsl = conf.getBoolean(LDAP_USE_SSL_KEY, LDAP_USE_SSL_DEFAULT);
-    keystore = conf.get(LDAP_KEYSTORE_KEY, LDAP_KEYSTORE_DEFAULT);
-    
-    keystorePass =
-        conf.get(LDAP_KEYSTORE_PASSWORD_KEY, LDAP_KEYSTORE_PASSWORD_DEFAULT);
-    if (keystorePass.isEmpty()) {
-      keystorePass = extractPassword(
-        conf.get(LDAP_KEYSTORE_PASSWORD_KEY, LDAP_KEYSTORE_PASSWORD_DEFAULT));
-    }
-    
-    bindUser = conf.get(BIND_USER_KEY, BIND_USER_DEFAULT);
-    bindPassword = conf.get(BIND_PASSWORD_KEY, BIND_PASSWORD_DEFAULT);
-    if (bindPassword.isEmpty()) {
-      bindPassword = extractPassword(
-          conf.get(BIND_PASSWORD_FILE_KEY, BIND_PASSWORD_FILE_DEFAULT));
-    }
-    
-    baseDN = conf.get(BASE_DN_KEY, BASE_DN_DEFAULT);
-    groupSearchFilter =
-        conf.get(GROUP_SEARCH_FILTER_KEY, GROUP_SEARCH_FILTER_DEFAULT);
-    userSearchFilter =
-        conf.get(USER_SEARCH_FILTER_KEY, USER_SEARCH_FILTER_DEFAULT);
-    groupMemberAttr =
-        conf.get(GROUP_MEMBERSHIP_ATTR_KEY, GROUP_MEMBERSHIP_ATTR_DEFAULT);
-    groupNameAttr =
-        conf.get(GROUP_NAME_ATTR_KEY, GROUP_NAME_ATTR_DEFAULT);
-
-    this.conf = conf;
-  }
-  
-  String extractPassword(String pwFile) {
-    if (pwFile.isEmpty()) {
-      // If there is no password file defined, we'll assume that we should do
-      // an anonymous bind
-      return "";
-    }
-    
-    try {
-      StringBuilder password = new StringBuilder();
-      Reader reader = new FileReader(pwFile);
-      int c = reader.read();
-      while (c > -1) {
-        password.append((char)c);
-        c = reader.read();
-      }
-      reader.close();
-      return password.toString();
-    } catch (IOException ioe) {
-      throw new RuntimeException("Could not read password file: " + pwFile, ioe);
-    }
-  }
-}

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/RefreshUserMappingsProtocol.java

@@ -33,7 +33,7 @@ import org.apache.hadoop.security.KerberosInfo;
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public interface RefreshUserMappingsProtocol {
+public interface RefreshUserMappingsProtocol extends VersionedProtocol {
   
   
   /**
   /**
    * Version 1: Initial version.
    * Version 1: Initial version.

+ 11 - 75
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/SecurityUtil.java

@@ -17,17 +17,12 @@
 package org.apache.hadoop.security;
 package org.apache.hadoop.security;
 
 
 import java.io.IOException;
 import java.io.IOException;
-import java.lang.reflect.Constructor;
-import java.lang.reflect.Field;
-import java.lang.reflect.InvocationTargetException;
-import java.lang.reflect.Method;
 import java.net.InetAddress;
 import java.net.InetAddress;
 import java.net.InetSocketAddress;
 import java.net.InetSocketAddress;
 import java.net.URI;
 import java.net.URI;
 import java.net.URL;
 import java.net.URL;
 import java.net.UnknownHostException;
 import java.net.UnknownHostException;
 import java.security.AccessController;
 import java.security.AccessController;
-import java.security.PrivilegedAction;
 import java.util.Arrays;
 import java.util.Arrays;
 import java.util.List;
 import java.util.List;
 import java.util.ServiceLoader;
 import java.util.ServiceLoader;
@@ -53,6 +48,9 @@ import com.google.common.annotations.VisibleForTesting;
 //this will need to be replaced someday when there is a suitable replacement
 //this will need to be replaced someday when there is a suitable replacement
 import sun.net.dns.ResolverConfiguration;
 import sun.net.dns.ResolverConfiguration;
 import sun.net.util.IPAddressUtil;
 import sun.net.util.IPAddressUtil;
+import sun.security.jgss.krb5.Krb5Util;
+import sun.security.krb5.Credentials;
+import sun.security.krb5.PrincipalName;
 
 
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
@@ -156,41 +154,12 @@ public class SecurityUtil {
     String serviceName = "host/" + remoteHost.getHost();
     String serviceName = "host/" + remoteHost.getHost();
     if (LOG.isDebugEnabled())
     if (LOG.isDebugEnabled())
       LOG.debug("Fetching service ticket for host at: " + serviceName);
       LOG.debug("Fetching service ticket for host at: " + serviceName);
-    Object serviceCred = null;
-    Method credsToTicketMeth;
-    Class<?> krb5utilClass;
+    Credentials serviceCred = null;
     try {
     try {
-      Class<?> principalClass;
-      Class<?> credentialsClass;
-      
-      if (System.getProperty("java.vendor").contains("IBM")) {
-        principalClass = Class.forName("com.ibm.security.krb5.PrincipalName");
-        
-        credentialsClass = Class.forName("com.ibm.security.krb5.Credentials");
-        krb5utilClass = Class.forName("com.ibm.security.jgss.mech.krb5");
-      } else {
-        principalClass = Class.forName("sun.security.krb5.PrincipalName");
-        credentialsClass = Class.forName("sun.security.krb5.Credentials");
-        krb5utilClass = Class.forName("sun.security.jgss.krb5");
-      }
-      @SuppressWarnings("rawtypes")
-      Constructor principalConstructor = principalClass.getConstructor(String.class, 
-          int.class);
-      Field KRB_NT_SRV_HST = principalClass.getDeclaredField("KRB_NT_SRV_HST");
-      Method acquireServiceCredsMeth = 
-          credentialsClass.getDeclaredMethod("acquireServiceCreds", 
-              String.class, credentialsClass);
-      Method ticketToCredsMeth = krb5utilClass.getDeclaredMethod("ticketToCreds", 
-          KerberosTicket.class);
-      credsToTicketMeth = krb5utilClass.getDeclaredMethod("credsToTicket", 
-          credentialsClass);
-      
-      Object principal = principalConstructor.newInstance(serviceName,
-          KRB_NT_SRV_HST.get(principalClass));
-      
-      serviceCred = acquireServiceCredsMeth.invoke(credentialsClass, 
-          principal.toString(), 
-          ticketToCredsMeth.invoke(krb5utilClass, getTgtFromSubject()));
+      PrincipalName principal = new PrincipalName(serviceName,
+          PrincipalName.KRB_NT_SRV_HST);
+      serviceCred = Credentials.acquireServiceCreds(principal
+          .toString(), Krb5Util.ticketToCreds(getTgtFromSubject()));
     } catch (Exception e) {
     } catch (Exception e) {
       throw new IOException("Can't get service ticket for: "
       throw new IOException("Can't get service ticket for: "
           + serviceName, e);
           + serviceName, e);
@@ -198,13 +167,8 @@ public class SecurityUtil {
     if (serviceCred == null) {
     if (serviceCred == null) {
       throw new IOException("Can't get service ticket for " + serviceName);
       throw new IOException("Can't get service ticket for " + serviceName);
     }
     }
-    try {
-      Subject.getSubject(AccessController.getContext()).getPrivateCredentials()
-          .add(credsToTicketMeth.invoke(krb5utilClass, serviceCred));
-    } catch (Exception e) {
-      throw new IOException("Can't get service ticket for: "
-          + serviceName, e);
-    }
+    Subject.getSubject(AccessController.getContext()).getPrivateCredentials()
+        .add(Krb5Util.credsToTicket(serviceCred));
   }
   }
   
   
   /**
   /**
@@ -484,27 +448,6 @@ public class SecurityUtil {
     return buildTokenService(NetUtils.createSocketAddr(uri.getAuthority()));
     return buildTokenService(NetUtils.createSocketAddr(uri.getAuthority()));
   }
   }
   
   
-  /**
-   * Perform the given action as the daemon's login user. If the login
-   * user cannot be determined, this will log a FATAL error and exit
-   * the whole JVM.
-   */
-  public static <T> T doAsLoginUserOrFatal(PrivilegedAction<T> action) { 
-    if (UserGroupInformation.isSecurityEnabled()) {
-      UserGroupInformation ugi = null;
-      try { 
-        ugi = UserGroupInformation.getLoginUser();
-      } catch (IOException e) {
-        LOG.fatal("Exception while getting login user", e);
-        e.printStackTrace();
-        Runtime.getRuntime().exit(-1);
-      }
-      return ugi.doAs(action);
-    } else {
-      return action.run();
-    }
-  }
-
   /**
   /**
    * Resolves a host subject to the security requirements determined by
    * Resolves a host subject to the security requirements determined by
    * hadoop.security.token.service.use_ip.
    * hadoop.security.token.service.use_ip.
@@ -654,12 +597,5 @@ public class SecurityUtil {
     void setSearchDomains(String ... domains) {
     void setSearchDomains(String ... domains) {
       searchDomains = Arrays.asList(domains);
       searchDomains = Arrays.asList(domains);
     }
     }
-  }
-
-  public static void initKrb5CipherSuites() {
-    if (UserGroupInformation.isSecurityEnabled()) {
-      System.setProperty("https.cipherSuites",
-          Krb5AndCertsSslSocketConnector.KRB5_CIPHER_SUITES.get(0));
-    }
-  }
+  }  
 }
 }

+ 15 - 43
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/UserGroupInformation.java

@@ -58,11 +58,14 @@ import org.apache.hadoop.metrics2.annotation.Metrics;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.DefaultMetricsSystem;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.metrics2.lib.MutableRate;
 import org.apache.hadoop.security.authentication.util.KerberosName;
 import org.apache.hadoop.security.authentication.util.KerberosName;
-import org.apache.hadoop.security.authentication.util.KerberosUtil;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.security.token.TokenIdentifier;
 import org.apache.hadoop.util.Shell;
 import org.apache.hadoop.util.Shell;
 
 
+import com.sun.security.auth.NTUserPrincipal;
+import com.sun.security.auth.UnixPrincipal;
+import com.sun.security.auth.module.Krb5LoginModule;
+
 /**
 /**
  * User and group information for Hadoop.
  * User and group information for Hadoop.
  * This class wraps around a JAAS Subject and provides methods to determine the
  * This class wraps around a JAAS Subject and provides methods to determine the
@@ -286,51 +289,20 @@ public class UserGroupInformation {
   private final boolean isKeytab;
   private final boolean isKeytab;
   private final boolean isKrbTkt;
   private final boolean isKrbTkt;
   
   
-  private static String OS_LOGIN_MODULE_NAME;
-  private static Class<? extends Principal> OS_PRINCIPAL_CLASS;
+  private static final String OS_LOGIN_MODULE_NAME;
+  private static final Class<? extends Principal> OS_PRINCIPAL_CLASS;
   private static final boolean windows = 
   private static final boolean windows = 
                            System.getProperty("os.name").startsWith("Windows");
                            System.getProperty("os.name").startsWith("Windows");
-  /* Return the OS login module class name */
-  private static String getOSLoginModuleName() {
-    if (System.getProperty("java.vendor").contains("IBM")) {
-      return windows ? "com.ibm.security.auth.module.NTLoginModule"
-       : "com.ibm.security.auth.module.LinuxLoginModule";
+  static {
+    if (windows) {
+      OS_LOGIN_MODULE_NAME = "com.sun.security.auth.module.NTLoginModule";
+      OS_PRINCIPAL_CLASS = NTUserPrincipal.class;
     } else {
     } else {
-      return windows ? "com.sun.security.auth.module.NTLoginModule"
-        : "com.sun.security.auth.module.UnixLoginModule";
+      OS_LOGIN_MODULE_NAME = "com.sun.security.auth.module.UnixLoginModule";
+      OS_PRINCIPAL_CLASS = UnixPrincipal.class;
     }
     }
   }
   }
-
-  /* Return the OS principal class */
-  @SuppressWarnings("unchecked")
-  private static Class<? extends Principal> getOsPrincipalClass() {
-    ClassLoader cl = ClassLoader.getSystemClassLoader();
-    try {
-      if (System.getProperty("java.vendor").contains("IBM")) {
-        if (windows) {
-          return (Class<? extends Principal>)
-            cl.loadClass("com.ibm.security.auth.UsernamePrincipal");
-        } else {
-          return (Class<? extends Principal>)
-            (System.getProperty("os.arch").contains("64")
-             ? cl.loadClass("com.ibm.security.auth.UsernamePrincipal")
-             : cl.loadClass("com.ibm.security.auth.LinuxPrincipal"));
-        }
-      } else {
-        return (Class<? extends Principal>) (windows
-           ? cl.loadClass("com.sun.security.auth.NTUserPrincipal")
-           : cl.loadClass("com.sun.security.auth.UnixPrincipal"));
-      }
-    } catch (ClassNotFoundException e) {
-      LOG.error("Unable to find JAAS classes:" + e.getMessage());
-    }
-    return null;
-  }
-  static {
-    OS_LOGIN_MODULE_NAME = getOSLoginModuleName();
-    OS_PRINCIPAL_CLASS = getOsPrincipalClass();
-  }
-
+  
   private static class RealUser implements Principal {
   private static class RealUser implements Principal {
     private final UserGroupInformation realUser;
     private final UserGroupInformation realUser;
     
     
@@ -410,7 +382,7 @@ public class UserGroupInformation {
       USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
       USER_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);
     }
     }
     private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
     private static final AppConfigurationEntry USER_KERBEROS_LOGIN =
-      new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
+      new AppConfigurationEntry(Krb5LoginModule.class.getName(),
                                 LoginModuleControlFlag.OPTIONAL,
                                 LoginModuleControlFlag.OPTIONAL,
                                 USER_KERBEROS_OPTIONS);
                                 USER_KERBEROS_OPTIONS);
     private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS = 
     private static final Map<String,String> KEYTAB_KERBEROS_OPTIONS = 
@@ -423,7 +395,7 @@ public class UserGroupInformation {
       KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);      
       KEYTAB_KERBEROS_OPTIONS.putAll(BASIC_JAAS_OPTIONS);      
     }
     }
     private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
     private static final AppConfigurationEntry KEYTAB_KERBEROS_LOGIN =
-      new AppConfigurationEntry(KerberosUtil.getKrb5LoginModuleName(),
+      new AppConfigurationEntry(Krb5LoginModule.class.getName(),
                                 LoginModuleControlFlag.REQUIRED,
                                 LoginModuleControlFlag.REQUIRED,
                                 KEYTAB_KERBEROS_OPTIONS);
                                 KEYTAB_KERBEROS_OPTIONS);
     
     

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/authorize/RefreshAuthorizationPolicyProtocol.java

@@ -32,7 +32,7 @@ import org.apache.hadoop.security.KerberosInfo;
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
     serverPrincipal=CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY)
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public interface RefreshAuthorizationPolicyProtocol {
+public interface RefreshAuthorizationPolicyProtocol extends VersionedProtocol {
   
   
   /**
   /**
    * Version 1: Initial version
    * Version 1: Initial version

+ 0 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/SecretManager.java

@@ -29,7 +29,6 @@ import javax.crypto.spec.SecretKeySpec;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceStability;
 import org.apache.hadoop.classification.InterfaceStability;
-import org.apache.hadoop.ipc.StandbyException;
 
 
 
 
 /**
 /**
@@ -73,17 +72,6 @@ public abstract class SecretManager<T extends TokenIdentifier> {
    * @return the newly created empty token identifier
    * @return the newly created empty token identifier
    */
    */
   public abstract T createIdentifier();
   public abstract T createIdentifier();
-
-  /**
-   * No-op if the secret manager is available for reading tokens, throw a
-   * StandbyException otherwise.
-   * 
-   * @throws StandbyException if the secret manager is not available to read
-   *         tokens
-   */
-  public void checkAvailableForRead() throws StandbyException {
-    // Default to being available for read.
-  }
   
   
   /**
   /**
    * The name of the hashing algorithm.
    * The name of the hashing algorithm.

+ 3 - 21
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/AbstractDelegationTokenSecretManager.java

@@ -40,8 +40,6 @@ import org.apache.hadoop.security.token.Token;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.security.token.SecretManager;
 import org.apache.hadoop.util.Daemon;
 import org.apache.hadoop.util.Daemon;
 
 
-import com.google.common.base.Preconditions;
-
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
 public abstract 
 public abstract 
@@ -86,12 +84,6 @@ extends AbstractDelegationTokenIdentifier>
   private Thread tokenRemoverThread;
   private Thread tokenRemoverThread;
   protected volatile boolean running;
   protected volatile boolean running;
 
 
-  /**
-   * If the delegation token update thread holds this lock, it will
-   * not get interrupted.
-   */
-  protected Object noInterruptsLock = new Object();
-
   public AbstractDelegationTokenSecretManager(long delegationKeyUpdateInterval,
   public AbstractDelegationTokenSecretManager(long delegationKeyUpdateInterval,
       long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
       long delegationTokenMaxLifetime, long delegationTokenRenewInterval,
       long delegationTokenRemoverScanInterval) {
       long delegationTokenRemoverScanInterval) {
@@ -103,7 +95,6 @@ extends AbstractDelegationTokenIdentifier>
 
 
   /** should be called before this object is used */
   /** should be called before this object is used */
   public void startThreads() throws IOException {
   public void startThreads() throws IOException {
-    Preconditions.checkState(!running);
     updateCurrentKey();
     updateCurrentKey();
     synchronized (this) {
     synchronized (this) {
       running = true;
       running = true;
@@ -363,21 +354,12 @@ extends AbstractDelegationTokenIdentifier>
     }
     }
   }
   }
 
 
-  public void stopThreads() {
+  public synchronized void stopThreads() {
     if (LOG.isDebugEnabled())
     if (LOG.isDebugEnabled())
       LOG.debug("Stopping expired delegation token remover thread");
       LOG.debug("Stopping expired delegation token remover thread");
     running = false;
     running = false;
-    
     if (tokenRemoverThread != null) {
     if (tokenRemoverThread != null) {
-      synchronized (noInterruptsLock) {
-        tokenRemoverThread.interrupt();
-      }
-      try {
-        tokenRemoverThread.join();
-      } catch (InterruptedException e) {
-        throw new RuntimeException(
-            "Unable to join on token removal thread", e);
-      }
+      tokenRemoverThread.interrupt();
     }
     }
   }
   }
   
   
@@ -413,7 +395,7 @@ extends AbstractDelegationTokenIdentifier>
             lastTokenCacheCleanup = now;
             lastTokenCacheCleanup = now;
           }
           }
           try {
           try {
-            Thread.sleep(Math.min(5000, keyUpdateInterval)); // 5 seconds
+            Thread.sleep(5000); // 5 seconds
           } catch (InterruptedException ie) {
           } catch (InterruptedException ie) {
             LOG
             LOG
             .error("InterruptedExcpetion recieved for ExpiredTokenRemover thread "
             .error("InterruptedExcpetion recieved for ExpiredTokenRemover thread "

+ 3 - 12
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/security/token/delegation/DelegationKey.java

@@ -42,20 +42,15 @@ public class DelegationKey implements Writable {
   @Nullable
   @Nullable
   private byte[] keyBytes = null;
   private byte[] keyBytes = null;
 
 
-  /** Default constructore required for Writable */
   public DelegationKey() {
   public DelegationKey() {
-    this(0, 0L, (SecretKey)null);
+    this(0, 0L, null);
   }
   }
 
 
   public DelegationKey(int keyId, long expiryDate, SecretKey key) {
   public DelegationKey(int keyId, long expiryDate, SecretKey key) {
-    this(keyId, expiryDate, key != null ? key.getEncoded() : null);
-  }
-  
-  public DelegationKey(int keyId, long expiryDate, byte[] encodedKey) {
     this.keyId = keyId;
     this.keyId = keyId;
     this.expiryDate = expiryDate;
     this.expiryDate = expiryDate;
-    if (encodedKey != null) {
-      this.keyBytes = encodedKey;
+    if (key!=null) {
+      this.keyBytes = key.getEncoded();
     }
     }
   }
   }
 
 
@@ -75,10 +70,6 @@ public class DelegationKey implements Writable {
       return key;
       return key;
     }
     }
   }
   }
-  
-  public byte[] getEncodedKey() {
-    return keyBytes;
-  }
 
 
   public void setExpiryDate(long expiryDate) {
   public void setExpiryDate(long expiryDate) {
     this.expiryDate = expiryDate;
     this.expiryDate = expiryDate;

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetGroupsBase.java

@@ -94,7 +94,7 @@ public abstract class GetGroupsBase extends Configured implements Tool {
    * @return A {@link GetUserMappingsProtocol} client proxy.
    * @return A {@link GetUserMappingsProtocol} client proxy.
    * @throws IOException
    * @throws IOException
    */
    */
-  protected GetUserMappingsProtocol getUgmProtocol() throws IOException {
+  private GetUserMappingsProtocol getUgmProtocol() throws IOException {
     GetUserMappingsProtocol userGroupMappingProtocol =
     GetUserMappingsProtocol userGroupMappingProtocol =
       RPC.getProxy(GetUserMappingsProtocol.class, 
       RPC.getProxy(GetUserMappingsProtocol.class, 
           GetUserMappingsProtocol.versionID,
           GetUserMappingsProtocol.versionID,

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/tools/GetUserMappingsProtocol.java

@@ -29,7 +29,7 @@ import org.apache.hadoop.ipc.VersionedProtocol;
  */
  */
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
 @InterfaceStability.Evolving
 @InterfaceStability.Evolving
-public interface GetUserMappingsProtocol {
+public interface GetUserMappingsProtocol extends VersionedProtocol {
   
   
   /**
   /**
    * Version 1: Initial version.
    * Version 1: Initial version.

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/GenericOptionsParser.java

@@ -305,7 +305,7 @@ public class GenericOptionsParser {
         }
         }
       }
       }
     }
     }
-    conf.setBoolean("mapreduce.client.genericoptionsparser.used", true);
+    conf.setBoolean("mapred.used.genericoptionsparser", true);
     
     
     // tokensFile
     // tokensFile
     if(line.hasOption("tokenCacheFile")) {
     if(line.hasOption("tokenCacheFile")) {

+ 0 - 72
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ProtoUtil.java

@@ -21,11 +21,6 @@ package org.apache.hadoop.util;
 import java.io.DataInput;
 import java.io.DataInput;
 import java.io.IOException;
 import java.io.IOException;
 
 
-import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.IpcConnectionContextProto;
-import org.apache.hadoop.ipc.protobuf.IpcConnectionContextProtos.UserInformationProto;
-import org.apache.hadoop.security.SaslRpcServer.AuthMethod;
-import org.apache.hadoop.security.UserGroupInformation;
-
 public abstract class ProtoUtil {
 public abstract class ProtoUtil {
 
 
   /**
   /**
@@ -68,71 +63,4 @@ public abstract class ProtoUtil {
     return result;
     return result;
   }
   }
 
 
-  
-  /** 
-   * This method creates the connection context  using exactly the same logic
-   * as the old connection context as was done for writable where
-   * the effective and real users are set based on the auth method.
-   *
-   */
-  public static IpcConnectionContextProto makeIpcConnectionContext(
-      final String protocol,
-      final UserGroupInformation ugi, final AuthMethod authMethod) {
-    IpcConnectionContextProto.Builder result = IpcConnectionContextProto.newBuilder();
-    if (protocol != null) {
-      result.setProtocol(protocol);
-    }
-    UserInformationProto.Builder ugiProto =  UserInformationProto.newBuilder();
-    if (ugi != null) {
-      /*
-       * In the connection context we send only additional user info that
-       * is not derived from the authentication done during connection setup.
-       */
-      if (authMethod == AuthMethod.KERBEROS) {
-        // Real user was established as part of the connection.
-        // Send effective user only.
-        ugiProto.setEffectiveUser(ugi.getUserName());
-      } else if (authMethod == AuthMethod.DIGEST) {
-        // With token, the connection itself establishes 
-        // both real and effective user. Hence send none in header.
-      } else {  // Simple authentication
-        // No user info is established as part of the connection.
-        // Send both effective user and real user
-        ugiProto.setEffectiveUser(ugi.getUserName());
-        if (ugi.getRealUser() != null) {
-          ugiProto.setRealUser(ugi.getRealUser().getUserName());
-        }
-      }
-    }   
-    result.setUserInfo(ugiProto);
-    return result.build();
-  }
-  
-  public static UserGroupInformation getUgi(IpcConnectionContextProto context) {
-    if (context.hasUserInfo()) {
-      UserInformationProto userInfo = context.getUserInfo();
-        return getUgi(userInfo);
-    } else {
-      return null;
-    }
-  }
-  
-  public static UserGroupInformation getUgi(UserInformationProto userInfo) {
-    UserGroupInformation ugi = null;
-    String effectiveUser = userInfo.hasEffectiveUser() ? userInfo
-        .getEffectiveUser() : null;
-    String realUser = userInfo.hasRealUser() ? userInfo.getRealUser() : null;
-    if (effectiveUser != null) {
-      if (realUser != null) {
-        UserGroupInformation realUserUgi = UserGroupInformation
-            .createRemoteUser(realUser);
-        ugi = UserGroupInformation
-            .createProxyUser(effectiveUser, realUserUgi);
-      } else {
-        ugi = org.apache.hadoop.security.UserGroupInformation
-            .createRemoteUser(effectiveUser);
-      }
-    }
-    return ugi;
-  }
 }
 }

+ 0 - 49
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ThreadUtil.java

@@ -1,49 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- *
- *     http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-package org.apache.hadoop.util;
-
-import org.apache.commons.logging.Log;
-import org.apache.commons.logging.LogFactory;
-
-import org.apache.hadoop.classification.InterfaceStability;
-
-@InterfaceStability.Evolving
-public class ThreadUtil {
-  
-  private static final Log LOG = LogFactory.getLog(ThreadUtil.class);
-
-  /**
-   * Cause the current thread to sleep as close as possible to the provided
-   * number of milliseconds. This method will log and ignore any
-   * {@link InterruptedException} encountered.
-   * 
-   * @param millis the number of milliseconds for the current thread to sleep
-   */
-  public static void sleepAtLeastIgnoreInterrupts(long millis) {
-    long start = System.currentTimeMillis();
-    while (System.currentTimeMillis() - start < millis) {
-      long timeToSleep = millis -
-          (System.currentTimeMillis() - start);
-      try {
-        Thread.sleep(timeToSleep);
-      } catch (InterruptedException ie) {
-        LOG.warn("interrupted while sleeping", ie);
-      }
-    }
-  }
-}

+ 0 - 30
hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/ToolRunner.java

@@ -17,7 +17,6 @@
  */
  */
 package org.apache.hadoop.util;
 package org.apache.hadoop.util;
 
 
-import java.io.IOException;
 import java.io.PrintStream;
 import java.io.PrintStream;
 
 
 import org.apache.hadoop.classification.InterfaceAudience;
 import org.apache.hadoop.classification.InterfaceAudience;
@@ -93,33 +92,4 @@ public class ToolRunner {
     GenericOptionsParser.printGenericCommandUsage(out);
     GenericOptionsParser.printGenericCommandUsage(out);
   }
   }
   
   
-  
-  /**
-   * Print out a prompt to the user, and return true if the user
-   * responds with "y" or "yes". (case insensitive)
-   */
-  public static boolean confirmPrompt(String prompt) throws IOException {
-    while (true) {
-      System.err.print(prompt + " (Y or N) ");
-      StringBuilder responseBuilder = new StringBuilder();
-      while (true) {
-        int c = System.in.read();
-        if (c == -1 || c == '\r' || c == '\n') {
-          break;
-        }
-        responseBuilder.append((char)c);
-      }
-  
-      String response = responseBuilder.toString();
-      if (response.equalsIgnoreCase("y") ||
-          response.equalsIgnoreCase("yes")) {
-        return true;
-      } else if (response.equalsIgnoreCase("n") ||
-          response.equalsIgnoreCase("no")) {
-        return false;
-      }
-      System.err.println("Invalid input: " + response);
-      // else ask them again
-    }
-  }
 }
 }

+ 0 - 2
hadoop-common-project/hadoop-common/src/main/native/configure.ac

@@ -57,8 +57,6 @@ JNI_LDFLAGS=""
 if test $JAVA_HOME != ""
 if test $JAVA_HOME != ""
 then
 then
   JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH/server"
   JNI_LDFLAGS="-L$JAVA_HOME/jre/lib/$OS_ARCH/server"
-  JVMSOPATH=`find $JAVA_HOME/jre/ -name libjvm.so | head -n 1`
-  JNI_LDFLAGS="$JNI_LDFLAGS -L`dirname $JVMSOPATH`"
 fi
 fi
 LDFLAGS="$LDFLAGS $JNI_LDFLAGS"
 LDFLAGS="$LDFLAGS $JNI_LDFLAGS"
 AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs])
 AC_CHECK_LIB([jvm], [JNI_GetCreatedJavaVMs])

+ 1 - 1
hadoop-common-project/hadoop-common/src/main/packages/hadoop-setup-conf.sh

@@ -484,7 +484,7 @@ else
 fi
 fi
 
 
 #unset env vars
 #unset env vars
-unset HADOOP_CLIENT_OPTS HADOOP_NAMENODE_OPTS HADOOP_DATANODE_OPTS HADOOP_SECONDARYNAMENODE_OPTS HADOOP_JAVA_PLATFORM_OPTS
+unset HADOOP_CLIENT_OPTS HADOOP_NAMENODE_OPTS HADOOP_JOBTRACKER_OPTS HADOOP_TASKTRACKER_OPTS HADOOP_DATANODE_OPTS HADOOP_SECONDARYNAMENODE_OPTS HADOOP_JAVA_PLATFORM_OPTS
 
 
 if [ "${AUTOMATED}" != "1" ]; then
 if [ "${AUTOMATED}" != "1" ]; then
   echo "Setup Hadoop Configuration"
   echo "Setup Hadoop Configuration"

+ 2 - 4
hadoop-common-project/hadoop-common/src/main/packages/templates/conf/hadoop-env.sh

@@ -22,10 +22,6 @@
 
 
 # The java implementation to use.
 # The java implementation to use.
 export JAVA_HOME=${JAVA_HOME}
 export JAVA_HOME=${JAVA_HOME}
-
-# The jsvc implementation to use. Jsvc is required to run secure datanodes.
-#export JSVC_HOME=${JSVC_HOME}
-
 export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
 export HADOOP_CONF_DIR=${HADOOP_CONF_DIR:-"/etc/hadoop"}
 
 
 # Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
 # Extra Java CLASSPATH elements.  Automatically insert capacity-scheduler.
@@ -46,6 +42,8 @@ export HADOOP_OPTS="-Djava.net.preferIPv4Stack=true $HADOOP_CLIENT_OPTS"
 
 
 # Command specific options appended to HADOOP_OPTS when specified
 # Command specific options appended to HADOOP_OPTS when specified
 export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
 export HADOOP_NAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_NAMENODE_OPTS"
+HADOOP_JOBTRACKER_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dmapred.audit.logger=INFO,MRAUDIT -Dmapred.jobsummary.logger=INFO,JSA $HADOOP_JOBTRACKER_OPTS"
+HADOOP_TASKTRACKER_OPTS="-Dhadoop.security.logger=ERROR,console -Dmapred.audit.logger=ERROR,console $HADOOP_TASKTRACKER_OPTS"
 HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
 HADOOP_DATANODE_OPTS="-Dhadoop.security.logger=ERROR,DRFAS $HADOOP_DATANODE_OPTS"
 
 
 export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"
 export HADOOP_SECONDARYNAMENODE_OPTS="-Dhadoop.security.logger=INFO,DRFAS -Dhdfs.audit.logger=INFO,DRFAAUDIT $HADOOP_SECONDARYNAMENODE_OPTS"

Некоторые файлы не были показаны из-за большого количества измененных файлов